query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Questions Card Update Form
def admin_card_update(card_id): mongo_collection = mongo_database["questions"] card = mongo_collection.find_one({"id": card_id}) return render_template( "admin_card_update.html", card=card, datetime=date_today.strftime("%x"), admin_logged=session.get('logged_in'), admin_session=session )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _edit_question(request, question):\n latest_revision = question.get_latest_revision()\n preview = None\n revision_form = None\n if request.method == 'POST':\n if 'select_revision' in request.POST:\n # The user submitted to change the revision to start editing from\n revision_form = RevisionForm(question, latest_revision, request.POST)\n if revision_form.is_valid():\n # Replace Question details with those from the selected revision\n form = EditQuestionForm(question,\n QuestionRevision.objects.get(question=question,\n revision=revision_form.cleaned_data['revision']))\n else:\n # Make sure we keep a hold of the user's other input, even\n # though they appear to be messing about.\n form = EditQuestionForm(question, latest_revision, request.POST)\n else:\n # Always check modifications against the latest revision\n form = EditQuestionForm(question, latest_revision, request.POST)\n if form.is_valid():\n html = sanitize_html(\n markdowner.convert(form.cleaned_data['text']))\n if 'preview' in request.POST:\n # The user submitted to preview the formatted question\n preview = mark_safe(html)\n elif 'submit' in request.POST:\n if form.has_changed():\n edited_at = datetime.datetime.now()\n tags_changed = (latest_revision.tagnames !=\n form.cleaned_data['tags'])\n tags_updated = False\n # Update the Question itself\n updated_fields = {\n 'title': form.cleaned_data['title'],\n 'last_edited_at': edited_at,\n 'last_edited_by': request.user,\n 'last_activity_at': edited_at,\n 'last_activity_by': request.user,\n 'tagnames': form.cleaned_data['tags'],\n 'summary': strip_tags(html)[:180],\n 'html': html,\n }\n if ('wiki' in form.cleaned_data and\n form.cleaned_data['wiki']):\n updated_fields['wiki'] = True\n updated_fields['wikified_at'] = edited_at\n Question.objects.filter(\n id=question.id).update(**updated_fields)\n # Update the Question's tag associations\n if tags_changed:\n tags_updated = Question.objects.update_tags(\n question, question.tagnames, request.user)\n # Create a new revision\n revision = QuestionRevision(\n question = question,\n title = form.cleaned_data['title'],\n author = request.user,\n revised_at = edited_at,\n tagnames = form.cleaned_data['tags'],\n text = form.cleaned_data['text']\n )\n if form.cleaned_data['summary']:\n revision.summary = form.cleaned_data['summary']\n else:\n revision.summary = \\\n diff.generate_question_revision_summary(\n latest_revision, revision,\n ('wiki' in updated_fields))\n revision.save()\n # TODO 5 body edits by the author = automatic wiki mode\n # TODO 4 individual editors = automatic wiki mode\n # TODO Badges related to Tag usage\n # TODO Badges related to editing Questions\n return HttpResponseRedirect(question.get_absolute_url())\n else:\n if 'revision' in request.GET:\n revision_form = RevisionForm(question, latest_revision, request.GET)\n if revision_form.is_valid():\n # Replace Question details with those from the selected revision\n form = EditQuestionForm(question,\n QuestionRevision.objects.get(question=question,\n revision=revision_form.cleaned_data['revision']))\n else:\n revision_form = RevisionForm(question, latest_revision)\n form = EditQuestionForm(question, latest_revision)\n if revision_form is None:\n # We're about to redisplay after a POST where we didn't care which\n # revision was selected - make sure the revision the user started from\n # is still selected on redisplay.\n revision_form = RevisionForm(question, latest_revision, request.POST)\n return render_to_response('edit_question.html', {\n 'title': u'Edit Question',\n 'question': question,\n 'revision_form': revision_form,\n 'form': form,\n 'preview': preview,\n }, context_instance=RequestContext(request))", "def edit_question(request, slug):\n\n form_data = question.objects.get(id=slug)\n question_form = UserQuestionForm(instance=form_data)\n\n if request.method == \"POST\":\n question_form = UserQuestionForm(request.POST, instance=form_data)\n\n if question_form.is_valid():\n question_form.save()\n\n messages.success(request, \"Question edited successfully\")\n\n return redirect('profile')\n\n else:\n question_form = UserQuestionForm(instance=form_data)\n\n return render(request, 'question.html', {\"question_form\": question_form})", "def editqn(qnID):\n if not current_user.check_educator():\n return render_template('errors/error403.html'), 403\n qn = validate_qn_link(qnID, current_user.id)\n form = QuestionForm()\n\n if request.method == 'GET':\n topicID = qn.topicID if qn.topicID else 0\n form.topic.data = topicID\n form.qn.data = qn.question\n options = [option.option for option in qn.options]\n form.op1.data, form.op2.data, form.op3.data, form.op4.data = options\n for i in range(len(options)):\n if qn.options[i].id == qn.answerID:\n form.corrOp.data = i + 1\n break\n\n if form.validate_on_submit():\n #Commit inputs to database\n options = (form.op1.data, form.op2.data, form.op3.data, form.op4.data)\n edit_question(qn, form.qn.data, options, form.corrOp.data, form.topic.data)\n flash('Question Edited Successfully!')\n return redirect(url_for('main.dashboard'))\n\n return render_template('quiz/createqn.html', title=' | Create Quiz', form=form, edit=True)", "def section_questions(request, index_card_id, section_id):\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n fields_to_show = \\\n ('statement','answer_type', 'answer_short', 'answer_long', 'comments')\n read_only_fields = ('comments')\n\n QuestionFormSet = modelformset_factory(Question, max_num=1, exclude=[])\n index_card = IndexCard.objects.get(id=index_card_id)\n section = index_card.section_set.get(id = section_id)\n\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/index.html\",\n {'index_card': index_card,\n \"service_inf_elements\": service_inf_elements,\n 'hard_inf_elements':hard_inf_elements,'built_env_elements':built_env_elements},\\\n context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n set_form_hidden_fields(formset, fields_to_show)\n set_form_readonly_fields(formset, read_only_fields)\n set_form_country_select(formset)\n else:\n query_set = Question.objects.filter(section_id = section_id).order_by('id')\n formset = QuestionFormSet(queryset=query_set)\n set_form_readonly_fields(formset, read_only_fields)\n set_form_hidden_fields(formset, fields_to_show)\n set_form_country_select(formset)\n\n return render_to_response(\"crppindexcard/section_questions.html\", {\n \"formset\": formset,\n \"index_card\": index_card,\n \"section\": section,\n }, context_instance=RequestContext(request))", "def edit_question():\r\n global new, point, edit_index,edit_mode\r\n add_question_Frame.grid_forget()\r\n quiz_frame.grid_forget()\r\n one_person_quiz_frame.grid_forget()\r\n question_list_frame.grid_forget()\r\n search_question_frame.grid_forget()\r\n\r\n select_question_frame.grid(row=0, column=0, rowspan=10, columnspan=10, sticky=N + E + S + W)\r\n\r\n add_question()", "def element_questions(request, index_card_id, element_id, component_name):\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n fields_to_show = ('statement','explanation','answer','mov','additional_information')\n\n QuestionFormSet = modelformset_factory(ElementQuestionCharField, max_num=1, form=MyForm)\n index_card = IndexCard.objects.get(id=index_card_id)\n element = Element.objects.get(id=element_id)\n\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/index.html\",\n {'index_card': index_card, 'service_inf_elements': service_inf_elements, \\\n 'hard_inf_elements':hard_inf_elements, 'built_env_elements': built_env_elements}\\\n , context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n set_form_hidden_fields(formset, fields_to_show)\n else:\n query_set = ElementQuestionCharField.objects.all().filter(element_id=element.id,index_card_id=index_card.id).order_by('id')\n formset = QuestionFormSet(queryset = query_set)\n set_form_hidden_fields(formset, fields_to_show)\n\n return render_to_response(\"crppindexcard/element_questions.html\",\n dict(formset=formset, index_card=index_card, element=element, \\\n constants=crppindexcard.constants, component_name=component_name), \\\n context_instance=RequestContext(request))", "def competence_questions(request, index_card_id, index_card_competence_category_id):\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n fields_to_apply = ('owner') #,'operator','competences','role_in_ec_plan')\n\n QuestionFormSet = modelformset_factory(CompetenceQuestion, max_num=1, exclude=[])\n index_card = IndexCard.objects.get(id=index_card_id)\n index_card_competence_category = IndexCardCompetenceCategory.objects.get(id=index_card_competence_category_id)\n\n\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/index.html\",\n {'index_card': index_card, 'service_inf_elements': service_inf_elements, \\\n 'hard_inf_elements':hard_inf_elements, 'built_env_elements':built_env_elements},\\\n context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n else:\n query_set = CompetenceQuestion.objects.filter(index_card_competence_category_id=index_card_competence_category_id).order_by('id')\n formset = QuestionFormSet(queryset = query_set)\n\n return render_to_response(\"crppindexcard/competence_questions.html\",\n dict(formset=formset, index_card=index_card, constants=crppindexcard.constants, \\\n index_card_competence_category=index_card_competence_category,\n service_inf_elements=service_inf_elements, hard_inf_elements=hard_inf_elements), \\\n context_instance=RequestContext(request))", "def testQuestionField(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['showYMD'] = False\n self.app.REQUEST.form['showHM'] = False\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors != {}, \"Validation error not raised\"\n assert errors.has_key('showYMD')\n assert errors.has_key('showHM')", "def add_question():\r\n global file, add_question_Frame, question_list_frame, new, edit_mode,edit_index\r\n # Forgetting frame so it doesn't interfere\r\n add_question_Frame.grid_forget()\r\n quiz_frame.grid_forget()\r\n one_person_quiz_frame.grid_forget()\r\n question_list_frame.grid_forget()\r\n select_question_frame.grid_forget()\r\n search_question_frame.grid_forget()\r\n\r\n add_question_Frame.grid(row=0, column=0, rowspan=10, columnspan=10, sticky=N + E + S + W)\r\n\r\n # New question entry form\r\n newQuestion = Label(add_question_Frame, text='Enter a new Question ')\r\n newQuestion.grid(row=0, column=0)\r\n new_question_entry = Entry(add_question_Frame, textvariable=new,width=80)\r\n new.get()\r\n new_question_entry.grid(row=0, column=1, columnspan=1)\r\n\r\n # answer choice one entry form\r\n answer1 = Label(add_question_Frame, text='Enter the correct answer ')\r\n answer1.grid(row=1, column=0)\r\n answer1_entry = Entry(add_question_Frame, textvariable=choice1,width=80)\r\n choice1.get()\r\n answer1_entry.grid(row=1, column=1)\r\n\r\n # answer choice two entry form\r\n answer2 = Label(add_question_Frame, text='Enter an incorrect answer ')\r\n answer2.grid(row=2, column=0)\r\n answer2_entry = Entry(add_question_Frame, textvariable=choice2,width=80)\r\n choice2.get()\r\n answer2_entry.grid(row=2, column=1)\r\n\r\n # answer choice three entry form\r\n answer3 = Label(add_question_Frame, text='Enter an incorrect answer ')\r\n answer3.grid(row=3, column=0)\r\n answer3_entry = Entry(add_question_Frame, textvariable=choice3,width=80)\r\n choice3.get()\r\n answer3_entry.grid(row=3, column=1)\r\n\r\n # answer choice four entry form\r\n answer4 = Label(add_question_Frame, text='Enter an incorrect answer ')\r\n answer4.grid(row=4, column=0)\r\n answer4_entry = Entry(add_question_Frame, textvariable=choice4,width=80)\r\n choice4.get()\r\n answer4_entry.grid(row=4, column=1)\r\n\r\n # point entry form\r\n points = Label(add_question_Frame, text='Enter point values')\r\n points.grid(row=0, column=3)\r\n points_entry = Entry(add_question_Frame, textvariable=point)\r\n point.get()\r\n points_entry.grid(row=0, column=4)\r\n\r\n # correct feedback entry form\r\n correctFeed = Label(add_question_Frame, text='Enter correct feedback')\r\n correctFeed.grid(row=5, column=0)\r\n correctFeed_entry = Entry(add_question_Frame, textvariable=correctFeedback,width=80)\r\n correctFeedback.get()\r\n correctFeed_entry.grid(row=5, column=1)\r\n\r\n # incorrect feedback entry form\r\n incorrect = Label(add_question_Frame, text='Enter witty incorrect feedback')\r\n incorrect.grid(row=6, column=0)\r\n incorrect_entry = Entry(add_question_Frame, textvariable=incorrectFeed,width=80)\r\n incorrectFeed.get()\r\n incorrect_entry.grid(row=6, column=1)\r\n\r\n # button to submit all the forms\r\n submit = Button(add_question_Frame, command=save_data, text='Submit')\r\n submit.grid(row=7, column=3)\r\n\r\n with open('Question_pool.txt', 'r') as fp:\r\n line = fp.readline()\r\n while line:\r\n list_box.insert(END,line.split(',')[0])\r\n line = fp.readline()\r\n list_box.grid()\r\n Scrollbar(add_question_Frame,orient=\"vertical\")", "def update(challenge, request):\n challenge.name = request.form['name']\n challenge.description = request.form['description']\n challenge.value = int(request.form.get('value', 0)) if request.form.get('value', 0) else 0\n challenge.max_attempts = int(request.form.get('max_attempts', 0)) if request.form.get('max_attempts', 0) else 0\n challenge.unlock_at = int(request.form.get('unlock_at', 0)) if request.form.get('unlock_at', 0) else 0\n challenge.category = request.form['category']\n challenge.hidden = 'hidden' in request.form\n db.session.commit()\n db.session.close()", "def form(update, context):\n update.message.reply_text(\"\"\"Fill out the form 👇 👇 👇\n https://forms.gle/VREhdtCNqJ6rZNfQ7\"\"\")", "def confirm_new_card(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER, False, True)\n print(\"COnfirm New data\")\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\n'\n message = message + f'You should: *{CURRENT_CONTEXT[\"strategy\"]}* \\n\\n'\n message = message + f'Choose the action you took: '\n keyboard = [\n [inline(CURRENT_USER[\"strategy\"])],\n [inline('Something Else')],\n [inline('New Round')]\n ]\n\n strategy_markup = InlineKeyboardMarkup(keyboard)\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=strategy_markup\n )\n\n # Tell ConversationHandler that we're in state `STRATEGY` now\n return STRATEGY", "def form_valid(self, form):\n\n action = self.get_object()\n \n self.request.user.assert_can_edit_action(action)\n\n question = action.question \n\n title = form.cleaned_data['title']\n #theese tags will be replaced to the old ones\n tagnames = form.cleaned_data['tags']\n text = form.cleaned_data['text']\n total_threshold = int(form.cleaned_data['threshold'])\n\n self.request.user.edit_question(\n question = question,\n title = title,\n body_text = text,\n revision_comment = None,\n tags = tagnames,\n wiki = False, \n edit_anonymously = False,\n ) \n\n\n new_categories = form.cleaned_data['category_set']\n old_categories = action.categories\n to_add, to_remove = self.update_values(old_categories, \n new_categories\n )\n action.category_set.add(*to_add)\n action.category_set.remove(*to_remove)\n\n geoname_data = form.cleaned_data['geoname_set']\n old_geonames = action.geonames\n new_geonames = self.get_or_create_geonames(geoname_data)\n to_add, to_remove = self.update_values(old_geonames, \n new_geonames\n )\n action.geoname_set.add(*to_add)\n action.geoname_set.remove(*to_remove)\n\n politician_data = form.cleaned_data['politician_set']\n old_politicians = action.politicians\n new_politicians = self.get_or_create_politicians(politician_data)\n to_add, to_remove = self.update_values(old_politicians, \n new_politicians\n )\n action.politician_set.add(*to_add)\n action.politician_set.remove(*to_remove)\n \n medias = form.cleaned_data['media_set']\n #TODO: Matteo \n #old_medias = action.medias\n #new_medias = []\n #to_add, to_remove = self.update_values(old_medias, \n # new_medias\n #)\n #action.media_set.add(*to_add)\n #action.media_set.remove(*to_remove)\n\n #for m2m_attr in (\n # 'geoname_set', \n # 'politician_set',\n # 'media_set'\n #):\n # #Theese attributes should contain Json data\n # m2m_value = form.cleaned_data.get(m2m_attr)\n # if m2m_attr[:-4] == 'geoname':\n # model = Geoname\n # kwargs = {}\n # kwargs['ext_res_type'] = {\n # 'type' : 'location_type',\n # 'name' : 'name'\n # }\n # kwargs['name'] = 'name'\n # elif m2m_attr[:-4] == 'politician':\n # model = Politician\n # kwargs = {}\n # #GET cityreps from locations ids\n # if form.cleaned_data['geoname_set']:\n # cityreps_ids = form.cleaned_data.get('geoname_set')\n # else:\n # cityreps_ids = [long(obj.external_resource.ext_res_id) \n # for obj in action.geonames]\n\n # # here we check that the threshold arrived is equal to\n # # the the sum of the thrershold delta of all the politicians\n # # the metho dwill raise exceptions if necessary\n # #kwargs['charge_ids'] = self.get_politicians_charge_ids(\n # # cityreps_ids,\n # # m2m_value,\n # # get_lookup(MAP_MODEL_SET_TO_CHANNEL['cityrep'])\n # #)\n # if type(m2m_value) != list:\n # m2m_value = [int(elem) for elem in m2m_value.strip('|').split('|')]\n\n # m2m_value_copy = [elem for elem in m2m_value]\n # kwargs['politicians_jsons'] = self.check_threshold(\n # cityreps_ids,\n # m2m_value_copy,\n # total_threshold\n # )\n # kwargs['id_prefix'] = 'content_'\n # elif m2m_attr[:-4] == 'media':\n # kwargs = {}\n # model = Media\n\n # if len(m2m_value) != 0:\n # \"\"\" Here we have to check if there are ExternalResource\n # objects with pk equal to the provided ids.\n # If there are ids that do not match with any ExternalResource\n # object pk, than create them. \n # If the ExternalResource which pks match with some ids was\n # created too time ago, then check the openpolis Json to see\n # if there had been some changes.\n # \n # Finally check if there are Geoname objects linked to the found\n # ExternalResource objects. If not, create them.\n # \n # \"\"\"\n # # Values can be overlapping or non overlapping\n # m2m_values_old = getattr(action, m2m_attr).all()\n\n # m2m_values_new = self.get_m2m_values(\n # m2m_attr,\n # m2m_value,\n # model,\n # #ext_res_type={\n # # 'type' : 'location_type',\n # # 'name' : 'name'\n # #}\n # **kwargs\n # )\n\n # to_add, to_remove = self.update_values(m2m_values_old, \n # m2m_values_new\n # )\n\n # getattr(action, m2m_attr).add(*to_add)\n # getattr(action, m2m_attr).remove(*to_remove)\n \n success_url = action.get_absolute_url()\n return views_support.response_redirect(self.request, success_url)", "def createForm(request):\n if request.method == 'POST':\n form = QuestionFormForm(request.POST)\n if form.is_valid():\n #return the uuid so the organization can use that link in the post to connect to the questionform\n formID = form.save().UUID\n #send them the url for the form\n messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)\n form = QuestionFormForm()\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)", "def submitForm(request,formcode=None):\n\n if formcode:\n #save the answer form response \n if request.method == 'POST':\n answer1 = request.POST.get('answer1', '')\n answer2 = request.POST.get('answer2', '')\n answer3 = request.POST.get('answer3', '')\n user = request.user\n \n #users can only submit one form for each scholarship\n if len(Response.objects.filter(user_id=user, form_id=formcode)) == 0: \n form = Response(answer1=answer1, answer2=answer2, answer3=answer3, user=user, form_id=formcode)\n form.save()\n return render(request,'success.html')\n return render(request, 'error.html')\n \n #Get both the questionform and the postform to display onto the response page\n questionForm = get_object_or_404(Form,UUID=formcode)\n postForm = Post.objects.filter(Q(link__icontains=formcode))[0]\n context = {'form': questionForm, 'UUID': formcode, 'post': postForm}\n return render(request, 'response.html', context=context) \n else:\n return redirect(search)", "def quiz(self, update: Update, context: CallbackContext) -> None:\r\n #questions = [\"1\", \"2\", \"4\", \"20\"]\r\n questions = qa.Question().options\r\n message = update.effective_message.reply_poll(\r\n \"How many eggs do you need for a cake?\", questions, type=Poll.QUIZ, correct_option_id=2\r\n )\r\n # Save some info about the poll the bot_data for later use in receive_quiz_answer\r\n payload = {\r\n message.poll.id: {\r\n \"questions\": questions,\r\n \"chat_id\": update.effective_chat.id, \r\n \"message_id\": message.message_id}\r\n }\r\n context.bot_data.update(payload)", "def question_new_validate():", "def ajaxReply(request):\n if request.method == 'POST':\n question_name = request.POST.get('que', None)\n choice1 = request.POST.get('cho', None)\n choice2 = request.POST.get('cho2', None)\n choice3 = request.POST.get('cho3', None)\n if Question.objects.filter(question_text=question_name).exists():\n return render(request, 'vote/index.html', {\n 'error_message': \"This question already exists.\",\n })\n else: \n question = Question(question_text=question_name, pub_date=timezone.now())\n question.save()\n question = Question.objects.get(question_text=question_name)\n choice = Question.objects.get(pk=question.id)\n choice.choice_set.create(choice_text=choice1, votes=0)\n choice.choice_set.create(choice_text=choice2, votes=0)\n choice.choice_set.create(choice_text=choice3, votes=0)\n return HttpResponseRedirect(reverse('vote:detail' ,args=(question.id,)))", "def complete_questionnaire(self):\n logger.info(\"Complete questionnaire.\")\n difficulty = self.driver.find_element(\"id\", \"difficulty\")\n difficulty.value = \"4\"\n engagement = self.driver.find_element(\"id\", \"engagement\")\n engagement.value = \"3\"", "def updateQuestionsSolved(self):\r\n self.questionsCompletedLabel.setText(\"Questions completed: {}\".format(save.getProblemsSolved()))", "def on_submit(self):\n\n if self.question_type == \"programming\":\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n self.ids[\"input_code_answer\"].text.replace(\"\\n\",\n \"*[SEAS-SLASH-N]*\"\n )\n )\n\n return True\n elif self.question_type == \"short_answer\":\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n self.ids[\"input_short_answer\"].text.replace(\"\\n\",\n \"*[SEAS-SLASH-N]*\"\n )\n )\n\n return True\n elif self.question_type == \"multiple_choice\":\n try:\n students_choice = self.multiple_choice_answer\n except:\n students_choice = \"\"\n\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n students_choice\n )\n\n return True\n else:\n return False", "def hazard_questions(request, index_card_id, hazard_id):\n QuestionFormSet = modelformset_factory(HazardAssessmentMatrix, max_num=1, exclude=[])\n index_card = IndexCard.objects.get(id=index_card_id)\n hazard = index_card.hazards.get(id = hazard_id)\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/index.html\",\n {'index_card': index_card, 'service_inf_elements': service_inf_elements, \\\n 'hard_inf_elements':hard_inf_elements,'built_env_elements':built_env_elements},\\\n context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n else:\n query_set = HazardAssessmentMatrix.objects.filter(index_card_id=index_card_id, hazard_id=hazard_id)\n formset = QuestionFormSet(queryset=query_set)\n\n return render_to_response(\"crppindexcard/hazard_questions.html\",{\n \"formset\":formset, \"index_card\":index_card, \"hazard\":hazard, \\\n 'service_inf_elements':service_inf_elements, 'hard_inf_elements':hard_inf_elements,\n \"constants\":crppindexcard.constants,}, \\\n context_instance=RequestContext(request))", "def new_quiz(update: Update, context: CallbackContext) -> None:\n if not isinstance(context.chat_data, dict):\n raise AssertionError\n if update.effective_chat:\n if context.chat_data.get('question_number', -1) == -1:\n options = ['quiz1', 'quiz2']\n keyboard = [[\n InlineKeyboardButton(i, callback_data=i) for i in options\n ]]\n reply_markup = InlineKeyboardMarkup(keyboard)\n context.chat_data['message'] = context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Choose your quiz. (Admin only)\",\n reply_markup=reply_markup)\n else:\n context.chat_data['message'] = context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"A quiz is already running, close it first!\")", "def handle_invalid_question_button_callback(\n update: Update, context: CallbackContext\n) -> None:\n update.callback_query.answer()\n update.effective_message.edit_text(\n \"Sorry, I could not process this button click 😕 Please send /n to get a new question.\"\n )", "def question_switch(request, id):\n question = get_object_or_404(Question, pk=id)\n\n # qproposal - endorse part\n categories = Category.objects.filter(name='proposed')\n proposed_cat = categories[0] if categories else None\n if question.category == proposed_cat:\n player = question.proposed_by.get_profile()\n staff_user = request.user\n amount = 0\n for tag in question.tags.all():\n if tag.name == 'qotd':\n amount = QOTD_GOLD\n elif tag.name == 'challenge':\n amount = CHALLENGE_GOLD\n elif tag.name == 'quest':\n amount = QUEST_GOLD\n\n # Question is endorsed\n if not question.endorsed_by:\n question.endorsed_by = staff_user\n question.save()\n scoring.score(player, None, 'bonus-gold', external_id=staff_user.id,\n gold=amount)\n\n # Endorsement is removed from question\n else:\n question.endorsed_by = None\n question.save()\n amount *= -1\n scoring.score(player, None, 'bonus-gold', external_id=staff_user.id,\n gold=amount)\n\n # regular activation of question\n else:\n question.active = not question.active\n question.save()\n\n go_back = request.META.get('HTTP_REFERER', None)\n if not go_back:\n go_back = reverse('wouso.interface.cpanel.views.qpool_home')\n\n return HttpResponseRedirect(go_back)", "async def app_questions(self, ctx: commands.Context):\n app_questions = await self.config.guild(ctx.guild).app_questions.get_raw()\n question_1 = app_questions[\"name\"]\n question_2 = app_questions[\"timezone\"]\n question_3 = app_questions[\"age\"]\n question_4 = app_questions[\"days\"]\n question_5 = app_questions[\"hours\"]\n question_6 = app_questions[\"experience\"]\n question_7 = app_questions[\"reasonforinterest\"]\n question_8 = app_questions[\"question8\"]\n question_9 = app_questions[\"question9\"]\n question_10 = app_questions[\"question10\"]\n question_11 = app_questions[\"question11\"]\n question_12 = app_questions[\"question12\"]\n question_13 = app_questions[\"finalcomments\"]\n\n await ctx.send(\n \"There are 13 questions in this application feature, with a few preloaded already for you.\\nHere is the current configuration:\"\n )\n e = discord.Embed(colour=await ctx.embed_colour())\n e.add_field(\n name=\"Question 1\", value=f\"{question_1}\" if question_1 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 2\", value=f\"{question_2}\" if question_2 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 3\", value=f\"{question_3}\" if question_3 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 4\", value=f\"{question_4}\" if question_4 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 5\", value=f\"{question_5}\" if question_5 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 6\", value=f\"{question_6}\" if question_6 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 7\", value=f\"{question_7}\" if question_7 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 8\", value=f\"{question_8}\" if question_8 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 9\", value=f\"{question_9}\" if question_9 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 10\", value=f\"{question_10}\" if question_10 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 11\", value=f\"{question_11}\" if question_11 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 12\", value=f\"{question_12}\" if question_12 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 13\", value=f\"{question_13}\" if question_13 else \"Not Set\", inline=False\n )\n await ctx.send(embed=e)", "async def set_questions(self, ctx: commands.Context):\n\n def check(m):\n return m.author == ctx.author and m.channel == ctx.channel\n\n await ctx.send(\n \"Let's set up those questions we've not pre-filled:\\nYou will be setting questions 8-12. You can view the preloaded questions by passing `{}appq`. To begin, reply with `admin abuse` *spelled exact*\".format(\n ctx.prefix\n )\n )\n try:\n confirmation = await ctx.bot.wait_for(\"message\", check=check, timeout=20)\n if confirmation.content.lower() != \"admin abuse\":\n return await ctx.send(\"Alright, let's do these later then\")\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took to long to respond, gotta be smarter than the users you're hiring for sure.\"\n )\n\n app_questions = await self.config.guild(ctx.guild).app_questions.get_raw()\n question_8 = app_questions[\"question8\"]\n question_9 = app_questions[\"question9\"]\n question_10 = app_questions[\"question10\"]\n question_11 = app_questions[\"question11\"]\n question_12 = app_questions[\"question12\"]\n await ctx.send(\n \"Alright, let's start with question 8: You have 5min to decide and respond with question you'd like, or respond with cancel to do this later\"\n )\n\n if question_8 is not None:\n await ctx.send(\n f\"Looks like question 8 is currently `{question_8}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_8 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_8.content.lower() != \"no\":\n if len(submit_8.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question8.set(\n submit_8.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n if question_8 is None:\n try:\n submit_8 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_8.content.lower() != \"cancel\":\n if len(submit_8.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question8.set(\n submit_8.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 9: Please respond with your next app question\")\n\n if question_9 is not None:\n await ctx.send(\n f\"Looks like question 9 is currently `{question_9}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_9 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_9.content.lower() != \"no\":\n if len(submit_9.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question9.set(\n submit_9.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 10: Please respond with your next app question\")\n\n if question_9 is None:\n try:\n submit_9 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_9.content.lower() != \"cancel\":\n if len(submit_9.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question9.set(\n submit_9.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 10: Please respond with your next app question\")\n\n if question_10 is not None:\n await ctx.send(\n f\"Looks like question 10 is currently `{question_10}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_10 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_10.content.lower() != \"no\":\n if len(submit_10.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question10.set(\n submit_10.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 11: Please respond with your next app question\")\n\n if question_10 is None:\n try:\n submit_10 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_10.content.lower() != \"cancel\":\n if len(submit_10.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question10.set(\n submit_10.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 11: Please respond with your next app question\")\n\n if question_11 is not None:\n await ctx.send(\n f\"Looks like question 11 is currently `{question_11}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_11 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_11.content.lower() != \"no\":\n if len(submit_11.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question11.set(\n submit_11.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 12: Please respond with your next app question\")\n\n if question_11 is None:\n try:\n submit_11 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_11.content.lower() != \"cancel\":\n if len(submit_11.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question11.set(\n submit_11.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 12: Please respond with your next app question\")\n\n if question_12 is not None:\n await ctx.send(\n f\"Looks like question 12 is currently `{question_12}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_12 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_12.content.lower() != \"no\":\n if len(submit_12.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question12.set(\n submit_12.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n if question_12 is None:\n try:\n submit_12 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_12.content.lower() != \"cancel\":\n if len(submit_12.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question12.set(\n submit_12.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n await ctx.send(\n \"That's all the questions and your apps are set *maybe, if you answered, anyway*. Check this with `{}appq`\".format(\n ctx.prefix\n )\n )", "def next_question(update: Update, context: CallbackContext) -> None:\n update.callback_query.answer()\n if not isinstance(context.chat_data, dict):\n raise AssertionError\n if context.chat_data['question_number'] < (\n len(context.chat_data['qlist']) - 1):\n context.chat_data['question_number'] += 1\n context.chat_data['question_attempted_by'] = []\n msg_text, option_keyboard = Quiz.parse_question(\n context.chat_data['qlist'][\n context.chat_data['question_number']])\n option_keyboard.append([\n InlineKeyboardButton(\"Next (Admin Only)\", callback_data=\"next\")\n ])\n context.chat_data['message'] = context.bot.edit_message_text(\n text=msg_text,\n chat_id=context.chat_data['message'].chat.id,\n message_id=context.chat_data['message'].message_id,\n reply_markup=InlineKeyboardMarkup(option_keyboard),\n parse_mode=ParseMode.MARKDOWN)\n else:\n Quiz.send_scoreboard(context=context)", "def insert_question_assignmentype(request, pk, cd):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n cd = int(cd)\n if cd == 1:\n classForm = AddQuestionForm\n info = 'Add'\n elif cd == -1:\n classForm = RemoveQuestionForm\n info = 'Remove'\n if assignmentype:\n if request.method == 'POST':\n form = classForm(request.POST,\n nb_questions=assignmentype.nb_questions)\n if form.is_valid():\n question = form.cleaned_data['question']\n # Modify attribute question of all associated evalquestion\n if cd == -1:\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question=question)\n evalquestions.delete()\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question__gte=question)\n evalquestions.update(question=F('question') + cd)\n # Create a new evalquestion for each evalassignment (if cd=1)\n # and inform that it has to be graded\n for evalassignment in Evalassignment.objects.filter(\n assignment__assignmentype=assignmentype):\n if cd == 1:\n Evalquestion.objects.create(\n evalassignment=evalassignment, question=question)\n evalassignment.reset_grade()\n elif cd == -1:\n evalassignment.grade_assignment = None\n evalassignment.save()\n # Add a question to the assignmentype\n assignmentype.nb_questions += cd\n if cd == 1:\n if assignmentype.questions_coeff:\n assignmentype.questions_coeff.insert(question - 1, None)\n if assignmentype.questions_statement:\n assignmentype.questions_statement.insert(question - 1,\n None)\n assignmentype.save()\n elif cd == -1:\n if assignmentype.questions_coeff:\n del assignmentype.questions_coeff[question - 1]\n if assignmentype.questions_statement:\n del assignmentype.questions_statement[question - 1]\n assignmentype.save()\n log = tasks.compute_grades_assignmentype(assignmentype.pk)\n logger.info(log)\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n form = classForm(nb_questions=assignmentype.nb_questions)\n context = {'assignmentype': assignmentype, 'form': form, 'info': info,\n 'cd': cd}\n return render(request, 'gradapp/insert_question.html', context)\n else:\n return redirect('gradapp:index')", "def test_incomplete_form(self):\n page = self.get_assert_200(self.url, user=self.voting_user1.username)\n form = page.forms[\"student-vote-form\"]\n self.fill_form(form, fill_complete=False)\n response = form.submit()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"vote for all rating questions\", response)\n\n form = page.forms[\"student-vote-form\"]\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_text_question)].value, \"some text\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_likert_question)].value, \"1\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_grade_question)].value, \"3\")\n\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_text_question)].value, \"some other text\")\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_likert_question)].value, \"4\")\n\n self.assertEqual(form[question_id(self.contribution2, self.contributor_questionnaire, self.contributor_text_question)].value, \"some more text\")" ]
[ "0.6444939", "0.6333186", "0.6309494", "0.61862105", "0.6130509", "0.611069", "0.60459816", "0.601469", "0.5895587", "0.5894713", "0.58369786", "0.5770874", "0.5741646", "0.56520283", "0.5640494", "0.5598499", "0.5576267", "0.55752164", "0.5573383", "0.55517626", "0.55438817", "0.55430025", "0.55341905", "0.5517238", "0.551277", "0.54966307", "0.5490304", "0.5456637", "0.54409593", "0.5397238" ]
0.68466634
0
Generate the closest possible SEO friendly path for this campaign. Note that these paths are only generated for campaigns which are already published.
def generate_seo_friendly_path(self, base_pathname_string='', campaignx_we_vote_id='', campaignx_title=None): from politician.controllers_generate_seo_friendly_path import generate_seo_friendly_path_generic return generate_seo_friendly_path_generic( base_pathname_string=base_pathname_string, for_campaign=True, for_politician=False, campaignx_title=campaignx_title, campaignx_we_vote_id=campaignx_we_vote_id, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_url(self):\n u = urlparse.urljoin(settings.SITE_URL, '/#/')\n\n m = self.object.__class__.__name__\n\n if m == 'Workspace':\n return urlparse.urljoin(\n u, 'workspaces/w/{}'.format(self.object.slug)\n )\n elif m == 'Vault':\n return urlparse.urljoin(\n u, 'workspaces/w/{}/vaults/v/{}'.format(\n self.object.workspace.slug, self.object.slug))\n elif m == 'Card':\n return urlparse.urljoin(\n u, '/workspaces/w/{}/vaults/v/{}/cards/c/{}'.format(\n self.object.vault.workspace.slug, self.object.vault.slug,\n self.object.slug))\n\n return None", "def generate_path(self):\n ontology = []\n for item in self.parent.get_ancestors():\n if item.level != 0:\n ontology.append(item.slug)\n\n if self.parent.level != 0:\n ontology.append(self.parent.slug)\n\n ontology.append(self.slug)\n\n return '/' + '/'.join(ontology) + '/'", "def get_short_path(content):", "def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath", "def full_url(self):\r\n\r\n url = '/' + '/'.join(p.slug for p in list(self.get_ancestors()) + [self] if p.slug)\r\n\r\n # Make sure the URL ends with a slash, as god intended.\r\n # This little endswith dance is done to handle the root url ('/') correctly.\r\n if not url.endswith('/'):\r\n url = url + '/'\r\n\r\n return url", "def _path(self):\n path = REQUIRES['static_url']\n\n # add paths as specified\n for prefix, subpath in self.getPrefixDict().items():\n if ( self.filename.startswith(prefix) ):\n path += subpath\n break;\n\n return path", "def get_full_path(self):\n return self.path_display", "def generate_url(self, campaign_id):\n pass", "def get_absolute_url(self):\n\n url = reverse('comicsite.views.site', args=[self.short_name])\n return url", "def abs_path(self) -> str:\n full_path = '/'.join(folder.name for folder in reversed(self.ancestors))\n return f'/{full_path}/'", "def url_for(self, path):\n if self.in_canvas:\n return self.get_app_url(path[1:])\n else:\n return '%s%s' % (settings.SITE_URL, path)", "def shorten_url():\n return rh.shorten_url(request)", "def constructShortestPath(self):", "def findShortestPath(self):\r\n pass", "def serving_path(self):\n if self.source_pod_path != self.pod_path or not self.use_fallback:\n path = self.pod.path_format.format_static(\n self.path_format, locale=self.locale,\n fingerprint=self.fingerprint)\n else:\n # Fall back to use the default locale for the formatted path.\n path = self.pod.path_format.format_static(\n self.base_path_format, locale=self.pod.podspec.default_locale,\n fingerprint=self.fingerprint)\n\n if not self.fingerprinted:\n return path\n\n base, ext = os.path.splitext(path)\n # Special case to preserve \".min.<ext>\" extensions.\n if base.endswith('.min'):\n base = base[:-4]\n return '{}-{}.min{}'.format(base, self.fingerprint, ext)\n return '{}-{}{}'.format(base, self.fingerprint, ext)", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def __url(self, object):\n return '/'.join(object.getPhysicalPath())", "def get_short_url_base():", "def generate_path(url, output_path='', site_root=''):\n path = [] if not site_root else [site_root.replace('/', '')]\n for item in url.split('/'):\n if item:\n path.append(item)\n if '.' not in path[-1] and path[-1].split('.'):\n path.append('index.html')\n return os.path.join(output_path, *path)", "def path_for(self, url, pagename):\n parts = pagename.split('/')[:-1]\n if len(parts) == 0:\n return url[1:]\n return os.path.relpath(url, '/%s' % '/'.join(parts))", "def path(self):\n if self.parent and self.parent.category_id:\n return self.parent.path + '/' + self.basename\n return self.basename", "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result", "def _make_path(self) -> str:\r\n path_ = Path(path.join(conf.instance.output_path, self.path_prefix, self.name))\r\n if self.is_identifier_in_paths:\r\n path_ = path_ / self.identifier\r\n return path_", "def path(self):\n return '/%s' % (self.full_name)", "def url(self):\n if self.term_type != 'C':\n url_fmt = self.path_level_url_fmt\n url_info = {'id': self.term_type}\n else:\n url_fmt = self.obj_level_url_fmt\n url_info = {'org_prefix': self.org_prefix, 'id': self.term_id}\n\n return url_fmt % url_info", "def get_url(self):\n if self.object_id is None:\n return '{0}/{1}'.format(self.parent.get_url(), self.path)\n\n return '{0}/{1}/{2}'.format(self.parent.get_url(), self.path,\n self.object_id.replace('/', '-'))", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def __str__(self):\n url = '{}/{}'.format(self.root, self.path)\n return url", "def get_custom_short_paths(content):", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))" ]
[ "0.64412355", "0.6416239", "0.622437", "0.5910468", "0.589471", "0.58929414", "0.58899176", "0.58650005", "0.5810434", "0.578214", "0.57725555", "0.5756489", "0.5720189", "0.5715019", "0.5710717", "0.57071894", "0.56893915", "0.5652677", "0.5633637", "0.56251574", "0.56203246", "0.56098557", "0.5572343", "0.5568986", "0.55671847", "0.55572134", "0.55553764", "0.5523693", "0.5500934", "0.54973066" ]
0.67637473
0
This function calculates the information gain, where ig(f1, f2) = H(f1) H(f1\f2)
def information_gain(f1, f2): ig = ee.entropyd(f1) - conditional_entropy(f1, f2) return ig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def su_calculation(f1, f2):\n # calculate information gain of f1 and f2, t1 = ig(f1, f2)\n t1 = information_gain(f1, f2)\n # calculate entropy of f1\n t2 = ee.entropyd(f1)\n # calculate entropy of f2\n t3 = ee.entropyd(f2)\n\n su = 2.0 * t1 / (t2 + t3)\n\n return su", "def info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def _information_gain(self, y, X_column, split_thersh):\n # parent E\n parent_entropy = entropy(y)\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thersh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n # weighted avg child E\n n = len(y)\n n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)\n entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right\n\n # return IG\n ig = parent_entropy - child_entropy\n return ig", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def compute_information_gain(Y, xi):\r\n H_Y = H(Y)\r\n\r\n TrainSet = np.delete(AllSets[2], -1, axis=1)\r\n ColumnInd = AllSets[3].index(xi) # extract from dictionary\r\n\r\n NumHeadlines = AllSets[2].shape[0]\r\n AllOccurences, Count = np.unique(AllSets[2][:, ColumnInd], return_counts=True)\r\n\r\n TotalH_YGivenX = 0\r\n for i, count in zip(AllOccurences, Count):\r\n NewY = Y[TrainSet[:, ColumnInd] == i]\r\n\r\n TotalH_YGivenX += H(NewY) * float(count) / NumHeadlines\r\n\r\n return H_Y - TotalH_YGivenX", "def calc_fffb_inhibition(self) -> None:\n # Feedforward inhibition\n ffi = self.spec.ff * max(self.avg_net - self.spec.ff0, 0)\n # Feedback inhibition\n self.fbi = self.spec.fb_dt * (self.spec.fb * self.avg_act - self.fbi)\n # Global inhibition\n self.gc_i = self.spec.gi * (ffi * self.fbi)", "def _information_gain(self, feature, node):\n return node.entropy() - self._entropy(feature, node)", "def info_gain(self, left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)", "def ig(X, y):\n\n # binarization: from counts to presence/abscence\n binarize(X, threshold=0.0, copy=False)\n\n # una columna por cada clase\n Y = LabelBinarizer().fit_transform(y)\n if Y.shape[1] == 1: # binary problem case\n Y = np.append(1-Y, Y, axis=1)\n\n Y_prob = (np.sum(Y, axis=0, dtype=np.float64) / len(Y)).reshape(-1, 1)\n\n # calculate the class entropy H(Y)\n class_entropy = _entropy(Y_prob)\n\n X_y_count = safe_sparse_dot(Y.T, X)\n # TODO XXX FIXME ver si estoy calculando bien esta probabilidad\n X_y_prob = \\\n X_y_count / np.sum(X_y_count, axis=0, dtype=np.float64)\n\n # calculate the conditional entropy of the class given the feature H(y|f_i)\n cond_entropy = _entropy(X_y_prob) # TODO XXX FIXME ver si estoy calculando bien la entropia condicional\n print \"class:\", class_entropy\n print \"cond_entropy:\", cond_entropy\n\n infogain = class_entropy - cond_entropy\n\n return infogain, None", "def return_infogain(instances, labels):\n # some initial calculations\n infogain = dict.fromkeys(range(instances.shape[1]), 0)\n cnt = Counts(instances, labels)\n len_instances = instances.shape[0]\n feature_frequency = cnt.count_document_frequency()\n label_frequency = cnt.count_label_frequency()\n label_feature_frequency = cnt.count_label_feature_frequency()\n label_probability = [(label_frequency[label] / len_instances) for label in label_frequency.keys()]\n initial_entropy = -sum([prob * math.log(prob, 2) for prob in label_probability if prob != 0])\n # assign infogain values to each feature\n for feature in feature_frequency.keys():\n # calculate positive entropy\n frequency = feature_frequency[feature]\n if frequency > 0:\n feature_probability = frequency / len_instances\n positive_label_probabilities = []\n for label in labels:\n if label_feature_frequency[label][feature] > 0:\n positive_label_probabilities.append(label_feature_frequency[label][feature] / frequency)\n else:\n positive_label_probabilities.append(0)\n positive_entropy = -sum([prob * math.log(prob, 2) for prob in positive_label_probabilities if prob != 0])\n else:\n positive_entropy = 0\n # calculate negative entropy\n inverse_frequency = len_instances - feature_frequency[feature]\n negative_probability = inverse_frequency / len_instances\n negative_label_probabilities = [((label_frequency[label] - label_feature_frequency[label][feature]) / inverse_frequency) for label in labels]\n negative_entropy = -sum([prob * math.log(prob, 2) for prob in negative_label_probabilities if prob != 0])\n # based on positive and negative entropy, calculate final entropy\n final_entropy = positive_entropy - negative_entropy\n infogain[feature] = initial_entropy - final_entropy\n return infogain", "def _calculate_information_gain(self, cur_state, next_state):\n\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n prob_cur = self.classifier.get_class1_prob(obs=cur_state)\n prob_next = self.classifier.get_class1_prob(obs=next_state)\n information_gain_true = (prob_next - prob_cur).reshape(-1, 1)\n\n next_state_null = np.copy(next_state)\n next_state_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_next_null = self.classifier.get_class1_prob(next_state_null)\n\n for i in range(self.action_dim):\n next_state_i = np.copy(next_state)\n next_state_i[:, -self.action_dim:] = self.classifier.missing_value\n next_state_i[:, -i - 1] = next_state[:, -i - 1]\n\n prob_next_i = self.classifier.get_class1_prob(obs=next_state_i)\n information_gain_per_action[:, -i - 1] = prob_next_i - prob_next_null\n\n information_gain_sum = np.sum(information_gain_per_action, axis=1, keepdims=True)\n ratio = information_gain_true / information_gain_sum\n ratio[information_gain_sum == 0] = 0\n information_gain_per_action = information_gain_per_action * ratio\n return information_gain_per_action", "def _calculate_information_gain(self, cur_state, next_state, next_label):\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n prob_prev = self.classifier.get_class1_prob(obs=cur_state)\n\n for i in range(self.action_dim):\n obs_i = np.copy(next_state)\n obs_i[:, -self.action_dim:] = cur_state[:, -self.action_dim:]\n obs_i[:, - i - 1] = next_state[:, -i - 1]\n\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_prev) * next_label[:, 0]\n class_0_gain = (prob_i - prob_prev) * (1 - next_label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action", "def get_info_gain(true_rows, false_rows, current_impurity):\n avg_impurity = (len(true_rows)/(len(true_rows)+len(false_rows))) * get_gini(true_rows) + \\\n (len(false_rows)/(len(true_rows)+len(false_rows))) * get_gini(false_rows)\n return current_impurity - avg_impurity", "def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)", "def calc_information_gain(data, split_name, target_name):\r\n # Calculate the original entropy\r\n original_entropy = calc_entropy(data[target_name])\r\n \r\n # Find the median of the column we're splitting\r\n column = data[split_name]\r\n median = column.median()\r\n \r\n # Make two subsets of the data, based on the median\r\n left_split = data[column <= median]\r\n right_split = data[column > median]\r\n \r\n # Loop through the splits and calculate the subset entropies\r\n to_subtract = 0\r\n for subset in [left_split, right_split]:\r\n prob = (subset.shape[0] / data.shape[0]) \r\n to_subtract += prob * calc_entropy(subset[target_name])\r\n \r\n # Return information gain\r\n return original_entropy - to_subtract", "def info_gain_ratio(Ex, a, nan=True):\n # Check whether examples and attributes have the same lengths.\n if len(Ex) != len(a):\n raise ValueError(\"Ex and a must be of the same size.\")\n\n # Compute information gain ratio as IG/IV\n return info_gain(Ex, a, nan) / intrinsic_value(Ex, a, nan)", "def calc_information_gain(data, split_name, target_name):\n # Calculate the original entropy\n original_entropy = calc_entropy(data[target_name])\n \n # Find the median of the column we're splitting\n column = data[split_name]\n median = column.median()\n \n # Make two subsets of the data, based on the median\n left_split = data[column <= median]\n right_split = data[column > median]\n \n # Loop through the splits and calculate the subset entropies\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0]) \n to_subtract += prob * calc_entropy(subset[target_name])\n \n # Return information gain\n return original_entropy - to_subtract", "def _calculate_information_gain(self, obs, label):\n n = len(obs)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n obs_null = np.copy(obs)\n obs_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_null = self.classifier.get_class1_prob(obs=obs_null)\n\n for i in range(self.action_dim):\n obs_i = np.copy(obs)\n for j in range(self.action_dim):\n if i != j:\n obs_i[:, - j - 1] = self.classifier.missing_value\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_null) * label[:, 0]\n class_0_gain = (prob_i - prob_null) * (1 - label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action", "def information_gain(df, var_list, label_col='label'):\n\n df = df.select(var_list + [label_col])\n\n df.cache()\n\n print \"[Info] Information gain - Cached DF for the computation of IG - Size: \" + str(df.count())\n\n Ht = single_entropy(df=df, var=label_col)\n\n print \"[Info] Information gain - Initial value of entropy: \" + str(Ht)\n\n ig_results = [(v, Ht - conditional_entropy(df=df, var=v, var_t=label_col)) for v in var_list]\n\n for ig in ig_results:\n print \"[Info] IG for variable \" + ig[0] + \": \" + str(ig[1])\n\n result_df = spark.createDataFrame(ig_results, ['feature', 'ig']).withColumn('init_entropy', lit(Ht))\n\n return result_df", "def ift2(G, df):\n\n N = len(G)\n g = ifftshift(ifft2(ifftshift(G))) * (N * df)**2\n\n return g", "def totalInfilHorton2time(f0, fc, k, t1, t2):\n fraction = (f0 - fc)/(-k)\n Ft = (fc*t2) - (fc*t1) + (fraction*(np.exp(-k*t2) - np.exp(-k*t1)))\n return Ft", "def _calculate_information_gain(self, obs, label):\n n = len(obs)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n obs_null = np.copy(obs)\n obs_null[:, -self.action_dim:] = self.classifier.missing_value\n ce_loss_null = self.classifier.calculate_ce_loss(obs=obs_null, label=label)\n\n for i in range(self.action_dim):\n obs_i = np.copy(obs)\n for j in range(self.action_dim):\n if i != j:\n obs_i[:, - j - 1] = self.classifier.missing_value\n ce_loss_i = self.classifier.calculate_ce_loss(obs=obs_i, label=label)\n\n information_gain_per_action[:, - i - 1] = (ce_loss_null - ce_loss_i)[:, 0]\n\n return information_gain_per_action", "def _information_gain(self, y, subsets):\n n = y.shape[0]\n child_entropy = 0\n\n for y_i in subsets:\n child_entropy += self._entropy(y_i) * y_i.shape[0] / float(n)\n\n return self._entropy(y) - child_entropy", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def symdif(f1, f2):\n\n y = union(subm(f1,f2),subm(f2,f1))\n return y", "def calc_kwta_inhibition(self) -> None:\n top_m_units = self.units.top_k_net_indices(self.spec.k + 1)\n g_i_thr_m = self.units.g_i_thr(top_m_units[-1])\n g_i_thr_k = self.units.g_i_thr(top_m_units[-2])\n self.gc_i = g_i_thr_m + 0.5 * (g_i_thr_k - g_i_thr_m)", "def get_information_gain(self, word, documents):\n gain = self.get_entropy(documents)\n with_word, without_word = self.get_split_data(word, documents)\n gain -= self.get_entropy(with_word) * len(with_word) / len(documents)\n gain -= self.get_entropy(without_word) * len(without_word) / len(documents)\n return gain", "def calculate_BIC(self): \n hmm_ll_calculator = LikelihoodInfEngineHMM(\n dbn=self.model.dbn, hidden_node_index=0, check_dbn=False)\n ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list) \n return 2 * ll_full - self._get_parameter_count() * math.log(\n self._get_observation_count())", "def __call__(self, f1, f2):\n r = len(set(f1.features) ^ set(f2.features))\n\n return exp(-self.gamma * r)" ]
[ "0.6145874", "0.6053615", "0.58665586", "0.58400637", "0.5828656", "0.5826964", "0.58186895", "0.5816665", "0.579917", "0.5762117", "0.5662728", "0.5646631", "0.563621", "0.55903405", "0.55712414", "0.5564625", "0.5501128", "0.5493852", "0.547589", "0.54473156", "0.54442096", "0.5419394", "0.5409516", "0.5408071", "0.5398654", "0.53926736", "0.5350368", "0.5341584", "0.5338852", "0.5321852" ]
0.7910877
0
This function calculates the conditional entropy, where ce = H(f1) I(f1;f2)
def conditional_entropy(f1, f2): ce = ee.entropyd(f1) - ee.midd(f1, f2) return ce
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_entropy_hyper(self) -> float:\n pass", "def conditional_entropy(self) -> float:\n pass", "def information_gain(f1, f2):\n\n ig = ee.entropyd(f1) - conditional_entropy(f1, f2)\n return ig", "def _conditional_entropy_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n total_occurrences = confmat.sum()\n p_xy_m = confmat / total_occurrences\n p_y = confmat.sum(1) / total_occurrences\n p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])\n return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)", "def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy", "def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])", "def cond_entropy(joint_prob, cond_prob):\n # Computing log2(P cond)\n log2_p = (np.ma.log2(cond_prob)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -( np.sum(prod_entropy))\n return H", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def calc_conditional_entropy(map,data_stat,attribute):\n #acquire the data info of the attribute stored in data_stat\n data_info = data_stat[attribute]\n #acquire the label info\n # label_col = len(data_stat)-1\n label_col = data_stat.keys()[-1]\n # print(data_stat.keys())\n label_info = data_stat[label_col]\n #acquire the data \n data = map[attribute]\n labels = map[label_col]\n conditional_entropy =0\n for data_type in data_info:\n specific_entropy = 0\n for label_type in label_info: \n #attribute data indices where all data entries are equal to a speicifc value\n data_with_spec_val_idx = data_info[data_type]\n #label indices where all labels are of same value\n spec_label_idx = label_info[label_type]\n #the intersection of the two indices above\n intersect_idx = np.intersect1d(data_with_spec_val_idx,spec_label_idx)\n #conditional probability of label being of specific value given speicific data value\n temp_prob = len(intersect_idx)/float(len(data_with_spec_val_idx))\n if temp_prob!=0:\n specific_entropy += temp_prob*math.log(temp_prob,2)\n specific_entropy = -specific_entropy\n prob = len(data_with_spec_val_idx)/float(len(data))\n conditional_entropy += prob * specific_entropy\n return conditional_entropy", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def conditional_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n\n marginals = np.nansum(P_nan, axis=1)\n P_cond = P_nan / marginals[:, None]\n\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_cond)))", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w", "def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)", "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def conditional_entropy(x, y, bins, normalize=False):\n \n # get the bins\n bins = get_2D_bins(x, y, bins)\n \n # calculate H(x,y) and H(y)\n hjoint = joint_entropy(x,y,bins)\n hy = entropy(y, bins[1])\n\n if normalize:\n normalizer = entropy(x, bins[0])\n conditional_entropy = hjoint - hy\n\n # check if conditional entropy and normalizer are very small\n if conditional_entropy < 1e-4 and normalizer < 1e-4:\n # return zero to prevent very high values of normalized conditional entropy\n # e.g. conditional entropy = -1.3e-12, normalizer = -1.6e-12 \n # -> normalized conditional entropy = 812.5\n return 0\n else:\n return conditional_entropy / normalizer\n else:\n return hjoint - hy", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def crossEntropy(p_m1):\n p_m2 = 1 - p_m1\n D = - p_m1*math.log(p_m1) - p_m2*math.log(p_m2)\n return D", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h" ]
[ "0.75129586", "0.74474317", "0.7124733", "0.70022875", "0.6818115", "0.6687272", "0.66868997", "0.6578242", "0.6539494", "0.6472258", "0.6470226", "0.64497244", "0.63971204", "0.63714343", "0.6354874", "0.63437366", "0.6324555", "0.6300499", "0.62546587", "0.6242879", "0.62109745", "0.61950696", "0.6186942", "0.61838675", "0.6169737", "0.61653996", "0.61446095", "0.6135737", "0.6133536", "0.6132925" ]
0.85797334
0
This function calculates the symmetrical uncertainty, where su(f1,f2) = 2IG(f1,f2)/(H(f1)+H(f2))
def su_calculation(f1, f2): # calculate information gain of f1 and f2, t1 = ig(f1, f2) t1 = information_gain(f1, f2) # calculate entropy of f1 t2 = ee.entropyd(f1) # calculate entropy of f2 t3 = ee.entropyd(f2) su = 2.0 * t1 / (t2 + t3) return su
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uncertainty_mm(self,m1,m2):\n # ID and isolation uncertainty (TO BE FIXED)\n unc = (self._muIDISOWeight.value(m1.pt(),m1.eta(),'+1')/self._muIDISOWeight.value(m1.pt(),m1.eta(),'+1')+ \\\n self._muIDISOWeight.value(m2.pt(),m2.eta(),'+1')/self._muIDISOWeight.value(m2.pt(),m2.eta(),'+1'))**2\n \n## # trigger (approximate) ==== FIXME!! ===============\n## hlt_sf_run2011_a_unc = (self._mu7TrgWeight [(m1.pt(),m1.eta())][1]/self._mu7TrgWeight [(m1.pt(),m1.eta())][0] + \\\n## self._mu7TrgWeight [(m2.pt(),m2.eta())][1]/self._mu7TrgWeight [(m2.pt(),m2.eta())][0])**2\n## hlt_sf_run2011_b_unc = (abs(self._mu8Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][1]+ \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu8Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][1])/ \\\n## (self._mu8Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]))**2\n## hlt_sf_run2011_b_unc += ((self._mu8Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][1])/ \\\n## (self._mu8Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu13Trg_Mu13Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu13Trg_Mu13Mu8_Weight[(m2.pt(),m2.eta())][0]))**2\n## hlt_sf_run2011_c_unc = (abs(self._mu8Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][1]+ \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu8Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][1])/ \\\n## (self._mu8Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]))**2\n## hlt_sf_run2011_c_unc += ((self._mu8Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][1]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][1])/ \\\n## (self._mu8Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]+ \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu8Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]- \\\n## self._mu17Trg_Mu17Mu8_Weight[(m1.pt(),m1.eta())][0]*self._mu17Trg_Mu17Mu8_Weight[(m2.pt(),m2.eta())][0]))**2\n## unc += 0.002*hlt_sf_run2011_a_unc + 0.643*hlt_sf_run2011_b_unc + 0.024*hlt_sf_run2011_c_unc\n \n return sqrt(unc)", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def hellinger(mu1, sigma1, mu2, sigma2, s=0.5):\n sigma1inv = np.linalg.inv(sigma1)\n sigma2inv = np.linalg.inv(sigma2)\n sigma1inv_sigma2 = np.dot(sigma1inv, sigma2)\n sigma2inv_sigma1 = np.dot(sigma2inv, sigma1)\n N = sigma1.shape[0]\n I = np.diag(np.ones(N))\n d = np.linalg.det(s*I+(1-s)*sigma1inv_sigma2)**(-s/2) *\\\n np.linalg.det((1-s)*I+s*sigma2inv_sigma1)**(-(1-s)/2) *\\\n np.exp(0.5*(_maha(s*np.dot(sigma2inv, mu2) + (1-s) *\\\n np.dot(sigma1inv, mu1), s*sigma2inv + (1-s)*sigma1inv) -\\\n s * _maha(mu2,sigma2)-(1-s)*_maha(mu1,sigma1)))\n return d", "def euc_dist(self, squared=True):", "def calc_uncertainty_reduction(trains_a, trains_b, metric, z, tau):\n\n return calc_mutual_information(calc_probability_matrix(\n trains_a, trains_b, metric, tau, z)) / calc_stimuli_entropy()", "def hellinger_weighted(mu1, sigma1, pi1, mu2, sigma2, pi2):\n sigma1norm = np.linalg.norm(sigma1)\n sigma2norm = np.linalg.norm(sigma2)\n X0 = np.zeros(mu1.shape)\n i = 2 * (sigma1norm**(1.0/4)) * (sigma2norm**(1.0/4)) * np.sqrt(2*np.pi) *\\\n gmm.mulnormpdf(X0, mu1-mu2, 2*sigma1 + 2*sigma2)\n #return np.sqrt(pi1*pi2) * (1-2*i)\n return 1-i[0]", "def chi2(u,v):\n\n u[u==0] = 1e-6\n v[v==0] = 1e-6\n r = np.sum(((u-v)**2).astype(np.float)/(u+v))\n\n # m = (u != 0) & (v != 0)\n # r = np.sum(((u[m]-v[m])**2).astype(np.float)/(u[m]+v[m]))\n\n # r = np.nansum(((u-v)**2).astype(np.float)/(u+v))\n return r", "def hchg(x, a1, a2, mu1, mu2):\n a = a1 + a2\n j = np.arange(250)\n if np.isscalar(x):\n x = np.array([x])\n x = x[:, np.newaxis]\n \n out = (mu1 * x) ** j / sp.factorial(j)\n out *= sp.poch(a1+a2, j) / sp.poch(a1, j)\n out *= sp.hyp1f1(a1+a2+j, a2, mu2*(1-x))\n out = out.sum(axis=1)\n return out if out.size > 1 else float(out)", "def IOU(s1, e1, s2, e2):\r\n if (s2 > e1) or (s1 > e2):\r\n return 0\r\n Aor = max(e1, e2) - min(s1, s2)\r\n Aand = min(e1, e2) - max(s1, s2)\r\n return float(Aand) / Aor", "def symdif(f1, f2):\n\n y = union(subm(f1,f2),subm(f2,f1))\n return y", "def relative_l2_error(u, U):\n return l2(u - U) / l2(u)", "def uncertainty_heisenberg(uncertainty=1,symb=deltap, units=SI):\n\n var1 = sy.var(symb)\n var2 = sy.var('hbar')\n var = var1,var2\n par = uncertainty,units['hbar']\n\n y = var2 / var1\n\n return dic_result(var,par,y)", "def calc_uncertainty(self):\n y = self.y\n y_true = self.y_true\n j_lim = self.j_lim\n Nj = self.Nj\n if issubclass(y.dtype.type, np.integer):\n # Categorial: percentage of wrong classes\n uncertainty_global = np.count_nonzero(y_true != y)/self.N\n uncertainty_group = np.empty(self.J)\n for j in range(self.J):\n uncertainty_group[j] = (\n np.count_nonzero(\n y_true[j_lim[j]:j_lim[j+1]] != y[j_lim[j]:j_lim[j+1]]\n ) / Nj[j]\n )\n else:\n # Continuous: R squared\n sst = np.sum(np.square(y - np.mean(y)))\n sse = np.sum(np.square(y - y_true))\n uncertainty_global = 1 - sse/sst\n uncertainty_group = np.empty(self.J)\n for j in range(self.J):\n sst = np.sum(np.square(\n y[j_lim[j]:j_lim[j+1]] - np.mean(y[j_lim[j]:j_lim[j+1]])\n ))\n sse = np.sum(np.square(\n y[j_lim[j]:j_lim[j+1]] - y_true[j_lim[j]:j_lim[j+1]]\n ))\n uncertainty_group[j] = 1 - sse/sst\n return uncertainty_global, uncertainty_group", "def chi2s(h1s, h2s):\n return np.sum((h1s-h2s)**2/(h1s+h2s+1e-10), axis=1)", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def hg1f2(Mu,Y):\n return float(mpmath.hyp1f2(0.5,2,Mu,-Y**2))", "def wass_gaussians(mu1, mu2, Sigma1, Sigma2):\n d = mu1.shape[0]\n if d == 1:\n w2 = (mu1 - mu2)**2 + (np.sqrt(Sigma1) - np.sqrt(Sigma2))**2\n else:\n prodSigmas = Sigma2**(1/2)*Sigma1*Sigma2**(1/2)\n w2 = np.linalg.norm(mu1 - mu2)**2 + np.trace(Sigma1 + Sigma2 - 2*(prodSigmas)**(1/2))\n return np.sqrt(w2)", "def egim_hesapla(x1, y1, x2, y2):\n\tsonuc = (y2 - y1) / (x2 - x1)\n\tprint float(sonuc)", "def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6,\n use_torch=False):\n\n if use_torch:\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n # Run 50 itrs of newton-schulz to get the matrix sqrt of\n # sigma1 dot sigma2\n covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50)\n if torch.any(torch.isnan(covmean)):\n return float('nan')\n covmean = covmean.squeeze()\n out = (diff.dot(diff) +\n torch.trace(sigma1) +\n torch.trace(sigma2) -\n 2 * torch.trace(covmean)).cpu().item()\n else:\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n out = (diff.dot(diff) +\n np.trace(sigma1) +\n np.trace(sigma2) -\n 2 * tr_covmean)\n return out", "def _U_func_numpy(x1, y1, x2, y2):\n\n # Return zero if same point\n if x1 == x2 and y1 == y2:\n return 0.\n\n # Calculate the squared Euclidean norm (r^2)\n r_2 = (x2 - x1) ** 2 + (y2 - y1) ** 2\n\n # Return the squared norm (r^2 * log r^2)\n return r_2 * np.log(r_2)", "def test_renyi_values():\n d1 = Distribution(['0', '1'], [0, 1])\n d2 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d3 = Distribution(['0', '1'], [1, 0])\n\n assert renyi_divergence(d1, d2, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d2, d3, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d1, d3, 1 / 2) == pytest.approx(np.inf)", "def compute_iou_for_contour_pair(contour1: np.ndarray, contour2: np.ndarray):\n im1, im2 = compute_contour_binary_masks(contour1, contour2)\n return (im1 & im2).sum() / (im1 | im2).sum()", "def estimate_uncertainties(self, model, obs, sig, mu=1.0):\n\n \n syn, J = self.synthesize_rf(model, mu=mu)\n\n error = model*0\n ny, nx = error.shape[0:2]\n \n for yy in range(ny):\n for xx in range(nx):\n \n for kk in range(9):\n J[yy,xx,kk] /= sig\n \n\n Hdiag = (J[yy,xx,:]**2).sum(axis=(1,2))\n error[yy,xx,:] = (((obs[yy,xx]-syn[yy,xx]) / sig )**2).sum()\n\n for kk in range(9):\n error[yy,xx,kk] /= Hdiag[kk]\n\n error *= 2.0 / 9.0\n \n return np.sqrt(error)", "def relative_L2_error(u, U, x):\n return L2(lambda x: u(x) - U(x), x) / L2(u, x)", "def _theils_u_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n s_xy = _conditional_entropy_compute(confmat)\n total_occurrences = confmat.sum()\n p_x = confmat.sum(0) / total_occurrences\n s_x = -torch.sum(p_x * torch.log(p_x))\n if s_x == 0:\n return torch.tensor(0, device=confmat.device)\n return (s_x - s_xy) / s_x", "def theils_u(x,\n y,\n nan_strategy=_REPLACE,\n nan_replace_value=_DEFAULT_REPLACE_VALUE):\n\n print(x.name + ' to ' + y.name + ' with Theils U')\n\n if nan_strategy == _REPLACE:\n x, y = replace_nan_with_value(x, y, nan_replace_value)\n elif nan_strategy == _DROP:\n x, y = remove_incomplete_samples(x, y)\n\n contingency = pd.crosstab(x, y)\n c, p, dof, expected = ss.chi2_contingency(contingency)\n\n s_xy = conditional_entropy(x, y)\n x_counter = Counter(x)\n total_occurrences = sum(x_counter.values())\n p_x = list(map(lambda n: n / total_occurrences, x_counter.values()))\n s_x = ss.entropy(p_x)\n if s_x == 0:\n return 1, 0\n else:\n return (s_x - s_xy) / s_x, p, r'$U$'", "def information_gain(f1, f2):\n\n ig = ee.entropyd(f1) - conditional_entropy(f1, f2)\n return ig", "def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):\r\n\r\n mu1 = np.atleast_1d(mu1)\r\n mu2 = np.atleast_1d(mu2)\r\n\r\n sigma1 = np.atleast_2d(sigma1)\r\n sigma2 = np.atleast_2d(sigma2)\r\n\r\n assert mu1.shape == mu2.shape, \\\r\n 'Training and test mean vectors have different lengths'\r\n assert sigma1.shape == sigma2.shape, \\\r\n 'Training and test covariances have different dimensions'\r\n\r\n diff = mu1 - mu2\r\n\r\n # Product might be almost singular\r\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\r\n if not np.isfinite(covmean).all():\r\n msg = ('fid calculation produces singular product; '\r\n 'adding %s to diagonal of cov estimates') % eps\r\n print(msg)\r\n offset = np.eye(sigma1.shape[0]) * eps\r\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\r\n\r\n # Numerical error might give slight imaginary component\r\n if np.iscomplexobj(covmean):\r\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\r\n m = np.max(np.abs(covmean.imag))\r\n raise ValueError('Imaginary component {}'.format(m))\r\n covmean = covmean.real\r\n\r\n tr_covmean = np.trace(covmean)\r\n\r\n return (diff.dot(diff) + np.trace(sigma1)\r\n + np.trace(sigma2) - 2 * tr_covmean)", "def hamiltonian_mse(fock1, fock2, scale=1.0):\n\n return ((fock1 - fock2)**2).flatten().sum() * scale", "def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)" ]
[ "0.60060245", "0.58210576", "0.5776708", "0.5709511", "0.56543845", "0.5593671", "0.55274516", "0.5487926", "0.54671097", "0.54472774", "0.54185194", "0.53845024", "0.53705823", "0.5368508", "0.53540176", "0.533016", "0.5324873", "0.5292968", "0.52852327", "0.52852046", "0.52554846", "0.52517277", "0.52427256", "0.52146757", "0.52142894", "0.5209174", "0.52091026", "0.5197996", "0.51967734", "0.5189083" ]
0.69187057
0
Chooses the appropriate rpc class based on the address format. Does not work for EtherscanRPC because there is no good way to check if an api_key is valid. If you know you have an apikey for Etherscan, instantiate the EtherscanRPC client directly.
def rpc_factory(address, verbose): if not isinstance(address, str): raise RPCError('The address must be a string: {!r}'.format(address)) if _os.path.exists(address) and _stat.S_ISSOCK(_os.stat(address).st_mode): return IPCRPC(address, verbose) elif _HTTP.match(address): return HTTPRPC(address, verbose) else: raise RPCError('Can\'t match address format to an RPC class.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_str(cls, address: str) -> Optional[Address]:\n if len(address) < 26 or len(address) > 35:\n return None\n # decode\n data = base58_decode(address)\n if data is None or len(data) != 25:\n return None\n # check code\n prefix = data[:21]\n suffix = data[21:]\n if check_code(prefix) == suffix:\n network = ord(data[:1])\n return cls(address=address, network=network)", "def from_str(cls, address: str) -> Optional[Address]:\n if is_eth(address=address):\n return cls(address=address)", "def get_client_class(api_name, version, version_map):\r\n try:\r\n client_path = version_map[str(version)]\r\n except (KeyError, ValueError):\r\n msg = _(\"Invalid %(api_name)s client version '%(version)s'. must be \"\r\n \"one of: %(map_keys)s\")\r\n msg = msg % {'api_name': api_name, 'version': version,\r\n 'map_keys': ', '.join(version_map.keys())}\r\n raise exceptions.UnsupportedVersion(msg)\r\n\r\n return import_class(client_path)", "def connect_api(class_type: str = \"service\", creds: dict = None):\n if class_type.lower() == \"service\":\n falcon_api = IOC(creds=creds)\n elif class_type.lower() == \"uber\":\n falcon_api = APIHarness(creds=creds)\n\n return falcon_api", "def getServiceFromAddress(address: str) -> Union[RequestSynchroneExtensionEscrowService, None]:\n pass", "def get_address(supvisors, strategy, addresses, expected_loading):\n if strategy == StartingStrategies.CONFIG:\n instance = ConfigStrategy(supvisors)\n if strategy == StartingStrategies.LESS_LOADED:\n instance = LessLoadedStrategy(supvisors)\n if strategy == StartingStrategies.MOST_LOADED:\n instance = MostLoadedStrategy(supvisors)\n # apply strategy result\n return instance.get_address(addresses, expected_loading)", "def register_instance(self, address, display_name=None, serverkey=None,\n vcpagentkey=None):\n _log.debug('register_instance called via RPC')\n\n parsed = urlparse(address)\n\n valid_schemes = ('http', 'https', 'tcp', 'ipc')\n if parsed.scheme not in valid_schemes:\n raise ValueError('Unknown scheme specified {} valid schemes are {}'\n .format(parsed.scheme, valid_schemes))\n\n if parsed.scheme in ('http', 'https'):\n self._register_instance(address,\n display_name=display_name)\n elif parsed.scheme == 'tcp':\n if not serverkey or len(serverkey) != 43: # valid publickey length\n raise ValueError(\n \"tcp addresses must have valid serverkey provided\")\n self.register_platform(address, serverkey, display_name)\n elif parsed.scheme == 'ipc':\n self.register_platform(address, display_name=display_name)", "def __init__(self, conn, iTag, srvType, addr):\r\n self._addr = addr\r\n\r\n args = srvType.split('/')\r\n\r\n if len(args) != 2:\r\n raise ValueError('Service type is not valid. Has to be of the '\r\n 'form pkg/srv, i.e. std_msgs/Int8.')\r\n\r\n self._srvCls = conn.loader.loadSrv(*args)\r\n self._srvCls._request_class = rospy.AnyMsg\r\n self._srvCls._response_class = rospy.AnyMsg\r\n\r\n super(ROSServiceProvider, self).__init__(conn, iTag, srvType,\r\n self._rceCB, ())", "def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme", "def from_string(cls, address: str) -> 'PublicKey':\n if len(address) != 56:\n raise ValueError(\"address format not supported\")\n\n if address[0] != \"G\":\n raise ValueError(\"provided address is not a public key\")\n\n return cls(kin_utils.is_valid_address(address))", "def __init__(self, address, type,):\n self.address = address\n self.type = type", "def _json_to_obj(cls, serialized_str):\n\n ret = None\n json_response = json.loads(serialized_str)\n\n # Creating a deep copy just in case later we want the original resp\n json_dict = copy.deepcopy(json_response)\n\n # Replacing attribute response names if they are Python reserved words\n # with a trailing underscore, for ex. id for id_ or if they have a\n # special character within the name replacing it for an underscore too\n json_dict = cls._replace_dict_key(\n json_dict, 'id', 'id_', recursion=True)\n\n if cls.PORT in json_dict:\n subnet_dict = json_dict.get(cls.PORT)\n ret = Port(**subnet_dict)\n return ret", "def _make_proto_resolve(self, addr: 'IPv4Address | IPv6Address | str | bytes', ptype: 'int') -> 'bytes':\n if ptype == Enum_EtherType.Internet_Protocol_version_4:\n return ipaddress.IPv4Address(addr).packed\n if ptype == Enum_EtherType.Internet_Protocol_version_6:\n return ipaddress.IPv6Address(addr).packed\n\n if isinstance(addr, str):\n return addr.encode()\n if isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)):\n return addr.packed\n return addr", "def buildProtocol(addr):", "def open(address):\n\n method, path = address.split(':', 1)\n\n if method == 'unix':\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(path)\n return sock\n\n if method == 'serial':\n ser = serial.Serial(path, 2400)\n return SerialWrapper(ser)\n\n if method == 'null':\n return NullDevice()\n\n raise ValueError('{} is not a valid method'.format(method))", "def __init__(self, rpc, name):\n self.rpc = rpc\n self.name = name", "def test_lookupChecksClass(self):\n badClass = Record_A('10.0.0.1')\n badClass.CLASS = HS\n servers = {\n ('1.1.2.3', 53): {\n ('foo.example.com', A): {\n 'answers': [('foo.example.com', badClass)],\n 'authority': [('foo.example.com', Record_NS('ns1.example.com'))],\n 'additional': [('ns1.example.com', Record_A('10.0.0.2'))],\n },\n },\n ('10.0.0.2', 53): {\n ('foo.example.com', A): {\n 'answers': [('foo.example.com', Record_A('10.0.0.3'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('foo.example.com')\n d.addCallback(getOnePayload)\n d.addCallback(self.assertEqual, Record_A('10.0.0.3'))\n return d", "def fromBytes(klass, bytes):\n assert klass.family is not None\n\n try:\n address = socket.inet_ntop(klass.family, bytes)\n\n except (socket.error, TypeError):\n raise ValueError(\"Invalid {0.__name__}\".format(klass), bytes)\n\n return klass(address)", "def __init__(self, api_key=None, secret_key=None, headers=None):\n # set up base requester\n self._base_requester = Requester(API_ENDPOINT, api_key=api_key, secret_key=secret_key, headers=headers)\n # add each endpoint\n self.geocode = self.Geocode(self._base_requester)\n self.places = self.Places(self._base_requester)", "def get_api(self, ranger_client, api_name):\n if api_name == \"create_policy\":\n return ranger_client.create_policy\n elif api_name == \"delete_policy_by_id\":\n return ranger_client.delete_policy_by_id\n elif api_name == \"get_policy_by_id\":\n return ranger_client.get_policy_by_id\n elif api_name == \"update_policy_by_id\":\n return ranger_client.update_policy_by_id\n else:\n raise Exception(f\"Unknown API name: {api_name}\")", "def __init__(self, net_type='fcnet'):\n net_type = net_type.lower()\n if net_type == 'fcnet':\n from network.starnet_com_process import Promoter as FCNet\n self.__method = FCNet()\n else:\n raise AssertionError('Cannot find network type that matches {}.'.format(net_type))", "def __init__(self, rpc):\n self.rpc = rpc", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def __init__(self, conn, iTag, srvType, addr):\r\n self._service = None\r\n self._addr = addr\r\n self._lock = Lock()\r\n self._pending = set()\r\n\r\n args = srvType.split('/')\r\n\r\n if len(args) != 2:\r\n raise ValueError('Service type is not valid. Has to be of the '\r\n 'form pkg/srv, i.e. std_msgs/Int8.')\r\n\r\n self._srvCls = conn.loader.loadSrv(*args)\r\n self._srvCls._request_class = rospy.AnyMsg\r\n self._srvCls._response_class = rospy.AnyMsg\r\n\r\n super(ROSServiceClient, self).__init__(conn, iTag, srvType)", "def api_factory(config):\n return SdkApi(config.get('apiKey'),\n sdk_api_base_url=config['sdkApiBaseUrl'],\n events_api_base_url=config['eventsApiBaseUrl'],\n split_sdk_machine_name=config['splitSdkMachineName'],\n split_sdk_machine_ip=config['splitSdkMachineIp'],\n connect_timeout=config['connectionTimeout'],\n read_timeout=config['readTimeout'])", "def create_and_verify(cls, api_key: Optional[str] = None, **params) -> \"Address\":\n requestor = Requestor(local_api_key=api_key)\n url = \"%s/%s\" % (cls.class_url(), \"create_and_verify\")\n\n wrapped_params = {cls.snakecase_name(): params}\n response, api_key = requestor.request(method=RequestMethod.POST, url=url, params=wrapped_params)\n\n return convert_to_easypost_object(response=response[\"address\"], api_key=api_key)", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n if not ipaddress:\n raise SoftDependencyError(\"ipaddress\")\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)", "def _read_proto_resolve(self, addr: 'bytes', ptype: 'int') -> 'str | IPv4Address | IPv6Address':\n if ptype == Enum_EtherType.Internet_Protocol_version_4: # IPv4\n return ipaddress.ip_address(addr)\n if ptype == Enum_EtherType.Internet_Protocol_version_6: # IPv6\n return ipaddress.ip_address(addr)\n return addr.hex()", "def buildProtocol(self, address):\n # Reject this connection if the IP is banned.\n ban = self.ip_bans.get(address.host)\n if ban and ban.hard:\n logger.verbose(\"Rejecting connection from banned IP {0}\".format(address.host))\n # This will send a RST packet\n return None\n # otherwise all good\n logger.verbose(\"Incoming SSH connection from {0.host}:{0.port}\".format(address))\n\n # Let our superclass do the rest\n transport = conch_factory.SSHFactory.buildProtocol(self, address)\n\n if ban:\n def disconnect():\n transport.sendDisconnect(1, \"You are banned from this server.\")\n transport.sendKexInit = disconnect\n return transport\n\n # Register the transport for the watchdog\n self.watchdog.add(transport)\n\n # Fix for Twisted bug? supportedPublicKeys is a dict_keys object,\n # but Twisted tries to use it as a sequence. Convert it to a list.\n transport.supportedPublicKeys = list(transport.supportedPublicKeys)\n\n return transport" ]
[ "0.571346", "0.5449916", "0.52416897", "0.5139773", "0.5136199", "0.5135899", "0.51007897", "0.5040956", "0.49199766", "0.47267127", "0.4723684", "0.46901482", "0.46455255", "0.4626003", "0.4620554", "0.46186823", "0.45921576", "0.45775348", "0.4561937", "0.45299584", "0.45271888", "0.45031536", "0.45028943", "0.44950554", "0.44939646", "0.44884053", "0.44853905", "0.447996", "0.4467161", "0.44610098" ]
0.6459828
0
Locates the specified datafiles and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of globpatterns for the files you want to copy.
def find_data_files(source, target, patterns): if glob.has_magic(source) or glob.has_magic(target): raise ValueError("Magic not allowed in src, target") ret = {} for pattern in patterns: pattern = os.path.join(source, pattern) for filename in glob.glob(pattern): if os.path.isfile(filename): targetpath = os.path.join(target, os.path.relpath(filename,source)) path = os.path.dirname(targetpath) ret.setdefault(path, []).append(filename) return sorted(ret.items())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _DataSourceFromFilePattern(self,\n file_pattern,\n input_source_weights=None,\n **extra_input_kwargs):\n del input_source_weights # Unused.\n return py_utils.NestedMap(data=tf.constant(file_pattern))", "def copy_files(step: BuildStep,\n patterns: List[str],\n target: str = None) -> None:\n log = step.get_logger()\n if target:\n log(f\"Copying files into build folder under {target}\")\n else:\n log(\"Copying files into build folder\")\n log(f\"Copy src={step.source_path} to build={step.build_path} target={target}\", VERBOSE * 2)\n dirs = files = 0\n for pattern in patterns:\n if pattern.endswith(\"/\"):\n log(f\"Looking for all files under '{pattern}'\", VERBOSE * 3)\n pattern += \"**/*\"\n elif \"*\" in pattern:\n log(f\"Looking for all files matching '{pattern}'\", VERBOSE * 3)\n else:\n log(f\"Looking for files named '{pattern}'\", VERBOSE * 3)\n file_per_pattern = 0\n for f in step.source_path.glob(pattern):\n relative = f.relative_to(step.source_path)\n if target:\n dest = step.build_path / target / relative\n else:\n dest = step.build_path / relative\n\n dest_parent = dest.parent\n if not dest_parent.is_dir():\n log(f\"Mkdir {dest_parent}\", VERBOSE)\n dest_parent.mkdir(parents=True)\n dirs += 1\n if f.is_file():\n log(f\"Copy {f}\", VERBOSE)\n copy2(f, dest)\n files += 1\n file_per_pattern += 1\n log(f\"Copied {file_per_pattern} files matching '{pattern}'\", VERBOSE * 2)\n # TODO: Expand capabilities to capture files/dirs per pattern, helpful to get lookup counts\n log(f\"Completed copying {len(patterns)} patterns. Created {files} files in {dirs} directories\")", "def match_files(patterns, files):\n\tall_files = files if isinstance(files, collections.Container) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files", "def get_target_files(self, src_dir, src_pattern):\n return File().get_target_files(src_dir, src_pattern)", "def get_data_files(source_dest_pairs):\n data_files = []\n for src_dir, dest_dir in source_dest_pairs:\n for src_root, _, files in os.walk(src_dir):\n dest_root = src_root.replace(src_dir, dest_dir, 1)\n dir_files = []\n for file_ in files:\n dir_files.append(os.path.join(src_root, file_))\n data_files.append((dest_root, dir_files))\n return data_files", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def _DataSourceFromFilePattern(self,\n file_pattern,\n input_source_weights=None,\n **extra_input_kwargs):\n del input_source_weights # Unused.\n\n def Process(source_id, record):\n del source_id # Unused.\n [num] = tf.py_func(int, [record], [tf.int64])\n return py_utils.NestedMap(data=num), 1\n\n # Samples random records from the data files and processes them\n # to generate batches.\n inputs, _ = generic_input.GenericInput(\n processor=Process,\n file_pattern=file_pattern,\n file_random_seed=123,\n file_buffer_size=1,\n file_parallelism=1,\n bucket_batch_limit=[1],\n bucket_upper_bound=[1])\n return inputs", "def data_files(self, pattern=None, regex=None):\n return self._files_in_subdir(self.data_dir, pattern, regex)", "def search(self, src, exclude_pattern = [\"**/*.pyc\"], include_pattern = [\"**/*.py\"]):\n src = os.path.abspath(src)\n \n _target = Path(src)\n _target._flavour.casefold = lambda x : x # basic windows path don't distinguish upper / lower case.\n allfiles = list(_target.glob(\"**/*\"))\n \n exclude = list()\n for _ex in exclude_pattern:\n exclude += _target.glob(_ex) \n \n include = list()\n for _in in include_pattern:\n include += _target.glob(_in) \n \n _target_path = set(allfiles) - set(exclude) | set(include)\n \n _target_dir_path = sorted(list(x for x in _target_path if x.is_dir() is True))\n _target_file_path = sorted(list(x for x in _target_path if x.is_file() is True))\n \n return _target_dir_path, _target_file_path", "def find_data_files_distutils(self, package, src_dir):\n from glob import glob\n import os\n from distutils.util import convert_path\n\n globs = (self.package_data.get('', [])\n + self.package_data.get(package, []))\n files = []\n for pattern in globs:\n # Each pattern has to be converted to a platform-specific path\n filelist = glob(os.path.join(src_dir, convert_path(pattern)))\n # Files that match more than one pattern are only added once\n files.extend([fn for fn in filelist if fn not in files\n and (os.path.isfile(fn) or os.path.islink(fn))])\n return files", "def glob(patterns: list[str]) -> Table:\n for val in _ensure_list(patterns):\n fol, _, pat = val.partition(\"/*\")\n folder = Path(fol)\n for file in folder.glob(\"*\" + pat):\n yield {\"file\": str(file)}", "def _find_data_files(pattern: str) -> List[str]:\n file_list = glob.glob(pattern)\n if not file_list:\n raise ValueError('No files found matching: ' + str(pattern))\n sorted_file_list = sorted(file_list, key=numerical_sort)\n return sorted_file_list", "def input_fn(self,\n file_pattern: List[Text]):\n root_paths = [x.replace(\"*\", \"\") for x in file_pattern]\n\n file_paths = []\n for root in root_paths:\n file_paths.extend(path_utils.list_dir(root))\n\n dataset = tf.data.TFRecordDataset(file_paths,\n compression_type='GZIP')\n df = convert_raw_dataset_to_pandas(dataset,\n self.schema,\n 100000)\n\n # Separate labels\n X = df[[x for x in df.columns if\n naming_utils.check_if_transformed_feature(x)]]\n y = df[[x for x in df.columns if\n naming_utils.check_if_transformed_label(x)]]\n return X, y", "def copyFiles(sourceDir, destinationDir, patterns):\n\tfrom glob import glob\n\tfrom os.path import join, abspath, exists, isfile\n\timport shutil\n\tsourceDir = adaptPath(sourceDir)\n\tdestinationDir = adaptPath(destinationDir)\n\t\n\tif exists(abspath(sourceDir)) == False:\n\t\tprint ('! \"%s\" directory not existing'%sourceDir)\n\tmakedir(destinationDir)\n\tfor pattern in patterns:\n\t\tsrcPath = join(sourceDir,pattern)\n\t\tfor filename in glob(srcPath):\n\t\t\tif isfile(filename):\n\t\t\t\ttry:\n\t\t\t\t\tshutil.copy2(filename, destinationDir)\n\t\t\t\texcept IOError:\n\t\t\t\t\tprint (\"! Failed copy '%s' -> '%s'\" %(filename, destinationDir))", "def find_data_files(pattern: str) -> List[str]:\n file_list = glob.glob(pattern)\n if not file_list:\n raise ValueError('No files found matching: ' + str(pattern))\n sorted_file_list = sorted(file_list, key=numerical_sort)\n return sorted_file_list", "def build_targets(self, patterns):\n _targets = []\n for p in patterns:\n p = p.format_map(self.config)\n for s in self.samples:\n e = dict(s, **self.config)\n _targets.append(p.format_map(e))\n return list(set(_targets))", "def _resolvePathPatterns(self, sources, source):\n kept = []\n pattern = re.compile(source['pathPattern'])\n basedir = self._basePath / source['path']\n if (self._basePath.name == Path(self._largeImagePath).name and\n (self._basePath.parent / source['path']).is_dir()):\n basedir = self._basePath.parent / source['path']\n basedir = basedir.resolve()\n for entry in basedir.iterdir():\n match = pattern.search(entry.name)\n if match:\n if entry.is_file():\n kept.append((entry.name, entry, match))\n elif entry.is_dir() and (entry / entry.name).is_file():\n kept.append((entry.name, entry / entry.name, match))\n for idx, (_, entry, match) in enumerate(sorted(kept)):\n subsource = copy.deepcopy(source)\n # Use named match groups to augment source values.\n for k, v in match.groupdict().items():\n if v.isdigit():\n v = int(v)\n if k.endswith('1'):\n v -= 1\n if '.' in k:\n subsource.setdefault(k.split('.', 1)[0], {})[k.split('.', 1)[1]] = v\n else:\n subsource[k] = v\n subsource['path'] = entry\n for axis in self._axesList:\n stepKey = '%sStep' % axis\n valuesKey = '%sValues' % axis\n if stepKey in source:\n if axis in source or valuesKey not in source:\n subsource[axis] = subsource.get(axis, 0) + idx * source[stepKey]\n else:\n subsource[valuesKey] = [\n val + idx * source[stepKey] for val in subsource[valuesKey]]\n del subsource['pathPattern']\n sources.append(subsource)", "def _load_all_data(file_patterns, columns=None):\n all_files = [f for pattern in file_patterns for f in glob.glob(pattern, recursive=True)]\n data = [pd.read_csv(d, usecols=columns, skipinitialspace=True) for d in all_files]\n df = pd.concat(data, ignore_index=True)\n return df", "def LocateFiles(pattern, root=os.curdir):\n for path, _, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def update_sources_data(sources_data, **sources_params):\n\n source_data_filename = sources_params[\"sourcedatafilename\"]\n\n for source in sort_sources(\n recursive_glob(sources_params[\"datapath\"], source_data_filename)\n ):\n update_file = open(source, \"r\", encoding=\"UTF-8\")\n update_data = json.load(update_file)\n sources_data.append(update_data)\n update_file.close()\n\n for source in sources_params[\"extensions\"]:\n source_dir = path_join_robust(sources_params[\"extensionspath\"], source)\n for update_file_path in sort_sources(\n recursive_glob(source_dir, source_data_filename)\n ):\n update_file = open(update_file_path, \"r\")\n update_data = json.load(update_file)\n\n sources_data.append(update_data)\n update_file.close()\n\n return sources_data", "def _ExtractWithFilter(\n self, source_path_specs, destination_path, output_writer,\n filter_file_path, skip_duplicates=True):\n for source_path_spec in source_path_specs:\n file_system, mount_point = self._GetSourceFileSystem(\n source_path_spec, resolver_context=self._resolver_context)\n\n if self._knowledge_base is None:\n self._Preprocess(file_system, mount_point)\n\n display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(\n source_path_spec)\n output_writer.Write(\n u'Extracting file entries from: {0:s}\\n'.format(display_name))\n\n environment_variables = self._knowledge_base.GetEnvironmentVariables()\n find_specs = frontend_utils.BuildFindSpecsFromFile(\n filter_file_path, environment_variables=environment_variables)\n\n searcher = file_system_searcher.FileSystemSearcher(\n file_system, mount_point)\n for path_spec in searcher.Find(find_specs=find_specs):\n self._ExtractFileEntry(\n path_spec, destination_path, output_writer,\n skip_duplicates=skip_duplicates)\n\n file_system.Close()", "def FindSources(env, dest, source, suffixes=None):\n for source_entry in env.Flatten(source):\n if type(source_entry) == str:\n # Search for matches for each source entry\n source_nodes = env.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively search subdir. Since glob('*') doesn't match dot files,\n # also glob('.*').\n FindSources(env, dest, [s.abspath + '/*', s.abspath + '/.*'],\n suffixes)\n elif suffixes and s.suffix in suffixes:\n dest.add(s)", "def find_files(base_path,pattern):\n res=()\n print_verbose(2,\"\\t> Recursive search: Base path = %s, pattern = %s\" %(base_path,pattern))\n for root, dirs, files in os.walk(base_path, topdown=True):\n for f_name in fnmatch.filter(files, pattern):\n res= res + (os.path.join(root, f_name),)\n return res;", "def xcopy(namePatterns, sourceDir, targetDir, renameTo=None, flags=None):\n\tnameL = dirR.listFilesMatch(sourceDir, namePatterns, flags)\n\tif len(nameL) == 0: return\n\tif not os.path.exists(targetDir): makeDir(targetDir)\n\tif renameTo == None:\n\t\tfor name in nameL:\n\t\t\tfull_source_path = os.path.join(sourceDir, name)\n\t\t\tfull_target_path = os.path.join(targetDir, name)\n\t\t\tshutil.copy(full_source_path, full_target_path)\n\telse:\n\t\tfull_source_path = os.path.join(sourceDir, nameL[0])\n\t\tfull_target_path = os.path.join(targetDir, renameTo)\n\t\tshutil.copy(full_source_path, full_target_path)", "def edit_files(patterns, expressions, # pylint: disable=R0913, R0914\r\n start_dir=None, max_depth=1, dry_run=True,\r\n output=sys.stdout):\r\n # Makes for a better diagnostic because str are also iterable.\r\n assert not isinstance(patterns, str), \"patterns should be a list\"\r\n assert not isinstance(expressions, str), \"expressions should be a list\"\r\n\r\n # Shortcut: if there is only one pattern, make sure we process just that.\r\n if len(patterns) == 1 and not start_dir:\r\n pattern = patterns[0]\r\n directory = os.path.dirname(pattern)\r\n if directory:\r\n patterns = [os.path.basename(pattern)]\r\n start_dir = directory\r\n max_depth = 1\r\n\r\n processed_paths = []\r\n editor = Editor(dry_run=dry_run)\r\n if expressions:\r\n editor.set_code_expr(expressions)\r\n if not start_dir:\r\n start_dir = os.getcwd()\r\n for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612\r\n if max_depth is not None:\r\n relpath = os.path.relpath(root, start=start_dir)\r\n depth = len(relpath.split(os.sep))\r\n if depth > max_depth:\r\n continue\r\n names = []\r\n for pattern in patterns:\r\n names += fnmatch.filter(files, pattern)\r\n for name in names:\r\n path = os.path.join(root, name)\r\n processed_paths.append(os.path.abspath(path))\r\n diffs = editor.edit_file(path)\r\n if dry_run:\r\n output.write(\"\".join(diffs))\r\n if output != sys.stdout:\r\n output.close()\r\n return processed_paths", "def load_all(self, root_dir, file_list=None, pattern=None):\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.ts')]\n if len(input_paths) == 0:\n raise Exception(\"No .ts files found using pattern: '{}'\".format(pattern))\n\n all_df, labels_df = self.load_single(input_paths[0]) # a single file contains dataset\n\n return all_df, labels_df", "def pattern_matching(pattern_base, cc_pattern_base):\n papers = [os.path.join(target_folder, paper) for paper in os.listdir(target_folder) if \".xml\" in paper]\n \n for paper in papers:\n paper_text = open(paper[:paper.index('.')]+\".txt\", 'r').read()\n \n annotator = detect_change_events(paper, pattern_base, paper_text) \n annotator = detect_cause_correlation(paper_text, cc_pattern_base, annotator)\n \n # Write the annotations to file\n with open(paper[:paper.index('.')]+\".ann\", 'w') as annfile:\n for annotation in annotator.annotations:\n annfile.write(annotation+\"\\n\")", "def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches", "def find_data_files_setuptools(self, package, src_dir):\n from glob import glob\n import itertools\n import os\n\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = [e for e in globs_matches if os.path.isfile(e) or os.path.islink(e)]\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)", "def rawvc_picard_merge_vcfs_targets_input(wildcards):\n return _rawvc_vcfs_targets_input(wildcards)" ]
[ "0.61576617", "0.6129363", "0.5919765", "0.5905183", "0.58921427", "0.5833857", "0.58126956", "0.5732098", "0.5684496", "0.5594679", "0.558987", "0.5580824", "0.5559924", "0.55368453", "0.5534052", "0.5506687", "0.53464913", "0.52918094", "0.5241423", "0.52367634", "0.5205368", "0.5200513", "0.5199385", "0.51857924", "0.5183654", "0.51768863", "0.51678073", "0.5153356", "0.5137763", "0.5115348" ]
0.7973076
0
Matches template image in a target grayscaled image
def match_template(img, template, threshold=0.9): #print(img) #print(template) res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) matches = np.where(res >= threshold) return matches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matchTemplate(image, template):\n match_hmap = cvCreateImage(\n cvSize(image.width-template.width+1, image.height-template.height+1),\n IPL_DEPTH_32F,\n 1\n )\n cvMatchTemplate(image, template, match_hmap, CV_TM_SQDIFF_NORMED)\n return match_hmap", "def templateMatchSingle(img, template):\n\timg = grayscale(img)\n\ttemplate = grayscale(template)\n\tw, h = template.shape[::-1]\n\tres = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)\n\tmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\ttop_left = max_loc\n\tbottom_right = (top_left[0] + w, top_left[1] + h)\n\treturn top_left, bottom_right", "def try_template_matching(image,template):\n img2 = image.copy()\n w, h = template.shape[::-1]\n # All the 6 methods for comparison in a list\n methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n for meth in methods:\n img = img2.copy()\n method = eval(meth)\n # Apply template Matching\n res = cv2.matchTemplate(img,template,method)\n res-=np.min(res)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n threshold=0.9*np.max(res)\n \n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n loc = np.where( res <=0.1*np.max(res))\n else:\n loc = np.where( res >= threshold)\n \n for pt in zip(*loc[::-1]):\n cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), 255, 2)\n plt.figure()\n plt.subplot(121),plt.imshow(image,cmap = 'gray')\n plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(img,cmap = 'gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n plt.suptitle(meth)\n plt.show()", "def hist_match_grey(source, template, to_int=True):\n\n oldshape = source.shape\n source = source.ravel()\n template = template.ravel()\n\n # get the set of unique pixel values and their corresponding indices and\n # counts\n s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,\n return_counts=True)\n t_values, t_counts = np.unique(template, return_counts=True)\n\n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n\n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n output = interp_t_values[bin_idx].reshape(oldshape)\n\n if to_int:\n output = output.astype(np.uint8)\n\n return output", "def templateMatchMulti(img, template):\n\tgray = grayscale(img)\n\ttemp = grayscale(template)\n\tw, h = temp.shape[::-1]\n\tres = cv2.matchTemplate(gray, temp, cv2.TM_CCOEFF_NORMED)\n\tthreshold = 0.8\n\tloc = np.where(res >= threshold)\n\tpts = []\n\tfor pt in zip(*loc[::-1]):\n\t\trect = [pt, (pt[0] + w, pt[1] + h)]\n\t\tpts.append(rect)\n\treturn pts", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def locate_template(template, img):\n temp_found = None\n (height, width) = template.shape[:2]\n\n for scale in np.linspace(0.1, 3, 10)[::-1]:\n # resize the image and store the ratio\n resized_img = imutils.resize(img, width=int(img.shape[1] * scale))\n ratio = img.shape[1] / float(resized_img.shape[1])\n if resized_img.shape[0] < height or resized_img.shape[1] < width:\n break\n # Convert to edged image for checking\n e = cv2.Canny(resized_img, 10, 25)\n match = cv2.matchTemplate(e, template, cv2.TM_CCOEFF)\n (_, val_max, _, loc_max) = cv2.minMaxLoc(match)\n if temp_found is None or val_max > temp_found[0]:\n temp_found = (val_max, loc_max, ratio)\n return temp_found", "def _find_using_template(image_to_find, image, threshold=None, **kwargs):\n threshold = 1e-6 if threshold is None else threshold\n result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)\n idx = np.argmin(result)\n metric = np.ravel(result)[idx]\n x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]\n if metric > threshold:\n raise FindError(metric, (x0, y0))\n x, y = image_to_find.shape[1::-1]\n target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)\n x1 = x0 + image_to_find.shape[1]\n y1 = y0 + image_to_find.shape[0]\n quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]\n H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])\n return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad)", "def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_):\n\n # thickness of rings for template match\n #commented out because this is passed now\n #rw = 8 #default 2 from DeepMoon project, we use 8 or 4\n\n # threshold target\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = [] # coordinates extracted from template matching\n corr = [] # correlation coefficient for coordinates set\n for r in radii:\n # template\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n\n # template match - result is nxn array of probabilities\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n\n # store x,y,r\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n\n # remove duplicates from template matching at neighboring radii/locations\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n\n dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n # replace current coord with max match probability coord in\n # duplicate list\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n\n return coords", "def match_template(image, template, mask=None, method=cv2.TM_CCORR_NORMED):\n mt_out = cv2.matchTemplate(image, template, method, mask=mask)\n th, tw = template.shape\n return cv2.copyMakeBorder(mt_out, th//2, th - th//2 - 1, tw//2, tw - tw//2 - 1, cv2.BORDER_CONSTANT)", "def match_templates(image, templates, overlap=0.15):\n default_threshold = 80\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rectangles = np.empty([0, 2, 2], dtype=int)\n for template in templates:\n threshold = template.get('threshold', default_threshold)\n if threshold > 100:\n threshold = 100\n elif threshold < 0:\n threshold = 0\n threshold /= 100.0\n template_image = template.get('image')\n template_flip = template.get('flip')\n template_mask = template.get('mask')\n template_method = template.get('method', 'canny') # defaults to canny\n gray_template = cv2.cvtColor(template_image, cv2.COLOR_BGR2GRAY)\n transformations = [lambda im: im]\n if template_flip:\n if template_flip[0] in ('h', 'a'):\n transformations.append(lambda im: cv2.flip(im, 1))\n if template_flip[0] in ('v', 'a'):\n transformations.append(lambda im: cv2.flip(im, 0))\n if template_flip[0] in ('b', 'a'):\n transformations.append(lambda im: cv2.flip(cv2.flip(im, 1), 0))\n for transformation in transformations:\n transformed_template = transformation(gray_template)\n height, width = transformed_template.shape\n if template_mask is not None:\n transformed_mask = transformation(template_mask)\n else:\n transformed_mask = None\n results = match_template_mask(gray_image, transformed_template,\n transformed_mask, template_method)\n index = results >= threshold\n y1, x1 = np.where(index)\n y2, x2 = y1 + height, x1 + width\n coords = np.array([x1, y1, x2, y2], dtype=int).T\n probs = results[index]\n boxes = np.array(\n object_detection.non_max_suppression(coords, probs, overlap)\n )\n xyboxes = boxes.reshape(boxes.shape[0], 2, 2) # list of x,y points\n rectangles = np.vstack([rectangles, xyboxes])\n return rectangles.astype(int)", "def hist_match(self, source, template):\n\n oldshape = source.shape\n # contiguous flattened array\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html\n source = source.ravel()\n template = template.ravel()\n \n # get the set of unique pixel values and their corresponding indices and\n # counts\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,\n return_counts=True)\n t_values, t_counts = np.unique(template, return_counts=True)\n \n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.cumsum.html?highlight=sum\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n \n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n \n if self.RootOutput:\n suffix=str(self.layer)\n prefix=self.RootPrefix\n nBin = 500\n cumsumOrig = ROOT.TH1F(prefix+\"cumsumOrig\"+suffix,prefix+\"cumsumOrig\"+suffix,nBin,s_values.min(),s_values.max())\n cumsumTemplate = ROOT.TH1F(prefix+\"cumsumTemplate\"+suffix,prefix+\"cumsumTemplate\"+suffix,nBin,t_values.min(),t_values.max())\n cumsumInterp = ROOT.TH1F(prefix+\"cumsumInterp\"+suffix,prefix+\"cumsumInterp\"+suffix,nBin,interp_t_values.min(),interp_t_values.max())\n for s_value in s_values:\n cumsumOrig.Fill(s_value)\n for t_value in t_values:\n cumsumTemplate.Fill(s_value)\n for interp_t_value in interp_t_values:\n cumsumInterp.Fill(interp_t_value)\n \n self.allHistos.append(cumsumTemplate) \n self.allHistos.append(cumsumOrig) \n self.allHistos.append(cumsumInterp)\n\n return interp_t_values[bin_idx].reshape(oldshape)", "def hist_match(source, template):\n\n oldshape = source.shape\n source = source.ravel()\n template = template.ravel()\n\n # get the set of unique pixel values and their corresponding indices and\n # counts\n s_values, bin_idx, s_counts = np.unique(\n source, return_inverse=True, return_counts=True\n )\n t_values, t_counts = np.unique(template, return_counts=True)\n\n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n\n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n\n return interp_t_values[bin_idx].reshape(oldshape)", "def hist_match(source, template):\n\n oldshape = source.shape\n source = source.ravel()\n template = template.ravel()\n\n # get the set of unique pixel values and their corresponding indices and\n # counts\n s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,\n return_counts=True)\n t_values, t_counts = np.unique(template, return_counts=True)\n\n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n\n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n\n return interp_t_values[bin_idx].reshape(oldshape)", "def hist_match(source: np.ndarray, template: np.ndarray) -> np.ndarray:\n positive = source > 0\n result = np.zeros_like(source)\n source = source[positive].ravel()\n template = template[template > 0].ravel()\n # get the set of unique pixel values and their corresponding indices and\n # counts\n source_values, bin_idx, source_counts = np.unique(source,\n return_inverse=True,\n return_counts=True)\n template_values, template_counts = np.unique(template,\n return_counts=True)\n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n source_quantiles = np.cumsum(source_counts).astype(np.float64)\n source_quantiles /= source_quantiles[-1]\n template_quantiles = np.cumsum(template_counts).astype(np.float64)\n template_quantiles /= template_quantiles[-1]\n\n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(source_quantiles, template_quantiles,\n template_values)\n result[positive] = interp_t_values[bin_idx]\n return result", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def drawMatchMulti(img, template, color = (0,0,255), thickness = 2):\n\ttmp = img.copy()\n\tgray = grayscale(img)\n\ttemp = grayscale(template)\n\tw, h = temp.shape[::-1]\n\tres = cv2.matchTemplate(gray, temp, cv2.TM_CCOEFF_NORMED)\n\tthreshold = 0.8\n\tloc = np.where(res >= threshold)\n\tfor pt in zip(*loc[::-1]):\n\t\tcv2.rectangle(tmp, pt, (pt[0] + w, pt[1] + h), color, thickness)\n\treturn tmp", "def single_image_band_match(tif):\n tif_raster=gdal_array.LoadFile(tif)\n \n file_path=tif[:-4]+\"matched.tif\"\n \n R=tif_raster[0]\n G=tif_raster[1]\n B=tif_raster[2]\n \n print(R.shape)\n result=ird.similarity(G,R , numiter=1, order=1)\n R= ird.transform_img(R, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n # print(result)\n print(R.shape)\n \n print(B.shape)\n result=ird.similarity(G,B , numiter=1, order=1)\n B= ird.transform_img(B, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n # print(result)\n print(B.shape)\n driver=osgeo.gdal.GetDriverByName(\"GTiff\")\n options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']\n \n dataset=driver.Create(file_path, R.shape[1],R.shape[0], 3, osgeo.gdal.GDT_UInt16, options) \n dataset.GetRasterBand(1).WriteArray(R)\n dataset.GetRasterBand(2).WriteArray(G)\n dataset.GetRasterBand(3).WriteArray(B)", "def check_image(self, image, temps):\n self.logger.debug('Check image \"%s\"', image)\n _, edges = cv2.threshold(cv2.imread(image, 0), 127, 255, cv2.THRESH_BINARY)\n\n result = []\n for filename in temps:\n template = cv2.imread(filename, 0)\n width, hight = template.shape[::-1]\n\n res = cv2.matchTemplate(edges, template, cv2.TM_CCORR_NORMED)\n if self.multi:\n for point in zip(*np.where(res >= self.threshold)[::-1]):\n result.append((point, (point[0] + width, point[1] + hight)))\n else:\n _, max_val, _, max_loc = cv2.minMaxLoc(res)\n if max_val > self.threshold:\n result.append((max_loc, (max_loc[0] + width, max_loc[1] + hight)))\n return result", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def drawMatch(img, template, color=(255,255,0), thickness=2):\n\ttmp = img.copy()\n\ttl, br = templateMatchSingle(tmp, template)\n\tcv2.rectangle(tmp, tl, br, color, thickness)\n\treturn tmp", "def extract_templates(im, interactive = False):\n\n im = np.flipud(im)\n# tmp = cv2.medianBlur(im, 5)\n# tmp = cv2.threshold(tmp, 255*0.65, 255, cv2.THRESH_BINARY)[1]\n\n im_filtered = filter_specgram(im, interactive)\n _, contours, _ = cv2.findContours(\n im_filtered,\n cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE\n )\n\n templates = []\n\n im_dbg_template_rejected = None\n im_dbg_template_overlay = None\n if interactive:\n im_dbg_template_rejected = im.copy()\n im_dbg_template_overlay = im.copy()\n\n #im_dbg_template_overlay *= 255/im_dbg_template_overlay.max()\n\n\n # apply trunc threshold\n # apply gaussian blur\n # apply binary threshold\n # remove small blobs\n # remove huge blobs\n # for each blob, check surrounding blobs within given radius and add \n # (how to choose which to add? what radius?\n smallest = -1\n average_val = np.average(im)\n print 'average: {}'.format(average_val)\n\n for i in xrange(len(contours)):\n r = cv2.boundingRect(contours[i])\n\n left = max(0, r[0] - 10)\n top = max(0, r[1] - 10)\n right = min(len(im[0]), r[0] + r[2] + 10)\n bottom = min(len(im), r[1] + r[3] + 10)\n\n area = r[2] * r[3]\n\n #TODO: use average values from sgram?\n if area < 50 or area > 10000: # : continue\n #if area > 10000:\n if not interactive: continue\n# cv2.putText(im_dbg_template_rejected, '{}'.format(area),\n# (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0,\n# int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n\n if smallest == -1 or area < smallest: smallest = area\n\n x = im[top:bottom, left:right]\n #x = im[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]\n if np.min(x) >= average_val:\n if not interactive: continue\n cv2.putText(im_dbg_template_rejected, 'v:{}'.format(np.average(x)), (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0, int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n x = cv2.GaussianBlur(x, (0,0), 1.5)\n templates.append(x)\n\n if interactive:\n cv2.rectangle(im_dbg_template_overlay, (left, top), (right, bottom), int(np.max(im_dbg_template_overlay)), 1)\n #cv2.rectangle(im_dbg_template_overlay, (r[0]-10, r[1]-10), (r[0]+r[2]+10, r[1]+r[3]+10), (255,0,0), 1)\n if interactive:\n plotMultiple([im_dbg_template_overlay, im_dbg_template_rejected],\n #plotMultiple([im_filtered, im_dbg_template_rejected],\n None,\n ['templates', 'rejected'])\n\n\n# cv2.namedWindow('orig')\n# cv2.imshow('orig', im_dbg_template_overlay)\n# cv2.namedWindow('rejected')\n# cv2.imshow('rejected', im_dbg_template_rejected)\n # plt.imshow(im_dbg_template_overlay, aspect='auto')\n # plt.show()\n print 'smallest: {}'.format(smallest)\n plt_(im_dbg_template_rejected,'reject')\n plt_(im_dbg_template_overlay,'accept')\n# while cv2.waitKey(0) != ord('n'):\n# pass\n\n return templates", "def test_make_mask_w_ref_image(self):\n output_mask = instance_mask(\n os.path.join(data_dir, 'geotiff_labels.geojson'),\n reference_im=os.path.join(data_dir, 'sample_geotiff.tif'),\n do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_inst_mask.tif'))\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))\n assert np.array_equal(output_mask, truth_mask)", "def process(self, img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # for every known sign check if any exist\n for sign, sign_img in self.signs.items():\n\n # convert to gray\n sign_img = cv2.cvtColor(sign_img, cv2.COLOR_BGR2GRAY)\n\n # match templates\n res = cv2.matchTemplate(img_gray, sign_img, cv2.TM_CCOEFF_NORMED)\n\n # check if template (sign) matched\n threshold = 0.8\n flag = False\n if np.amax(res) > threshold:\n flag = True\n\n # save which sign is matched\n if flag:\n self.detected = sign\n print(f'template found: {sign}')\n\n # find the location\n w, h = sign_img.shape[:: -1]\n loc = np.where(res >= threshold)\n for pt in zip(*loc[:: -1]):\n cropped_sign = img[pt[1]: pt[1] + int(h/2), pt[0]: pt[0] + int(w/2)]\n\n # if it is a traffic light\n if sign == \"traffic_light\":\n # apply color detection\n color = self.color_detection(cropped_sign)\n\n if color == \"red\":\n self.command = \"stop\"\n elif color == \"green\":\n self.command = \"go\"\n elif color == \"yellow\":\n self.command = \"go\"\n\n return img", "def _compute_prediction(self, k, img, kp, des):\n\n # find corresponding points in the input image and the template image\n #put keypoints from template image in template_pts\n #put corresponding keypoints from input image in img_pts\n good = []\n self.matcher = cv2.BFMatcher() #cv2's \"brute force\" matcher\n matches = self.matcher.knnMatch(self.descs[k],des,k=2)\n for m,n in matches:\n if m.distance < self.good_thresh*n.distance: #if first best keypoint is closer to the template than .7 * second best, it's good\n good.append(m)\n if len(good) > self.min_match_count:\n img_pts = np.float32([ kp[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n template_pts = np.float32([ self.kps[k][m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n else:\n return None\n\n # Transform input image so that it matches the template image as well as possible\n M, mask = cv2.findHomography(img_pts, template_pts, cv2.RANSAC, self.ransac_thresh)\n img_T = cv2.warpPerspective(img, M, self.signs[k].shape[::-1])\n visual_diff = compare_images(img_T, self.signs[k])\n return visual_diff", "def create_non_correspondences(uv_b_matches, img_b_shape, num_non_matches_per_match=100, img_b_mask=None):\n image_width = img_b_shape[1]\n image_height = img_b_shape[0]\n # print(\"uv_b_matches: \", uv_b_matches)\n if uv_b_matches == None:\n return None\n\n num_matches = len(uv_b_matches[0])\n\n def get_random_uv_b_non_matches():\n return pytorch_rand_select_pixel(width=image_width,height=image_height, \n num_samples=num_matches*num_non_matches_per_match)\n\n if img_b_mask is not None:\n img_b_mask_flat = img_b_mask.view(-1,1).squeeze(1)\n mask_b_indices_flat = torch.nonzero(img_b_mask_flat)\n if len(mask_b_indices_flat) == 0:\n print(\"warning, empty mask b\")\n uv_b_non_matches = get_random_uv_b_non_matches()\n else:\n num_samples = num_matches*num_non_matches_per_match\n rand_numbers_b = torch.rand(num_samples)*len(mask_b_indices_flat)\n rand_indices_b = torch.floor(rand_numbers_b).long()\n randomized_mask_b_indices_flat = torch.index_select(mask_b_indices_flat, 0, rand_indices_b).squeeze(1)\n uv_b_non_matches = (randomized_mask_b_indices_flat%image_width, randomized_mask_b_indices_flat/image_width)\n else:\n uv_b_non_matches = get_random_uv_b_non_matches()\n \n # for each in uv_a, we want non-matches\n # first just randomly sample \"non_matches\"\n # we will later move random samples that were too close to being matches\n uv_b_non_matches = (uv_b_non_matches[0].view(num_matches,num_non_matches_per_match), uv_b_non_matches[1].view(num_matches,num_non_matches_per_match))\n\n # uv_b_matches can now be used to make sure no \"non_matches\" are too close\n # to preserve tensor size, rather than pruning, we can perturb these in pixel space\n copied_uv_b_matches_0 = torch.t(uv_b_matches[0].repeat(num_non_matches_per_match, 1))\n copied_uv_b_matches_1 = torch.t(uv_b_matches[1].repeat(num_non_matches_per_match, 1))\n\n diffs_0 = copied_uv_b_matches_0 - uv_b_non_matches[0].type(dtype_float)\n diffs_1 = copied_uv_b_matches_1 - uv_b_non_matches[1].type(dtype_float)\n\n diffs_0_flattened = diffs_0.contiguous().view(-1,1)\n diffs_1_flattened = diffs_1.contiguous().view(-1,1)\n\n diffs_0_flattened = torch.abs(diffs_0_flattened).squeeze(1)\n diffs_1_flattened = torch.abs(diffs_1_flattened).squeeze(1)\n\n\n need_to_be_perturbed = torch.zeros_like(diffs_0_flattened)\n ones = torch.zeros_like(diffs_0_flattened)\n num_pixels_too_close = 1.0\n threshold = torch.ones_like(diffs_0_flattened)*num_pixels_too_close\n\n # determine which pixels are too close to being matches\n need_to_be_perturbed = where(diffs_0_flattened < threshold, ones, need_to_be_perturbed)\n need_to_be_perturbed = where(diffs_1_flattened < threshold, ones, need_to_be_perturbed)\n\n minimal_perturb = num_pixels_too_close/2\n minimal_perturb_vector = (torch.rand(len(need_to_be_perturbed))*2).floor()*(minimal_perturb*2)-minimal_perturb\n std_dev = 10\n random_vector = torch.randn(len(need_to_be_perturbed))*std_dev + minimal_perturb_vector\n perturb_vector = need_to_be_perturbed*random_vector\n\n uv_b_non_matches_0_flat = uv_b_non_matches[0].view(-1,1).type(dtype_float).squeeze(1)\n uv_b_non_matches_1_flat = uv_b_non_matches[1].view(-1,1).type(dtype_float).squeeze(1)\n\n uv_b_non_matches_0_flat = uv_b_non_matches_0_flat + perturb_vector\n uv_b_non_matches_1_flat = uv_b_non_matches_1_flat + perturb_vector\n\n # now just need to wrap around any that went out of bounds\n\n # handle wrapping in width\n lower_bound = 0.0\n upper_bound = image_width*1.0 - 1\n lower_bound_vec = torch.ones_like(uv_b_non_matches_0_flat) * lower_bound\n upper_bound_vec = torch.ones_like(uv_b_non_matches_0_flat) * upper_bound\n\n uv_b_non_matches_0_flat = where(uv_b_non_matches_0_flat > upper_bound_vec, \n uv_b_non_matches_0_flat - upper_bound_vec, \n uv_b_non_matches_0_flat)\n\n uv_b_non_matches_0_flat = where(uv_b_non_matches_0_flat < lower_bound_vec, \n uv_b_non_matches_0_flat + upper_bound_vec, \n uv_b_non_matches_0_flat)\n\n # handle wrapping in height\n lower_bound = 0.0\n upper_bound = image_height*1.0 - 1\n lower_bound_vec = torch.ones_like(uv_b_non_matches_1_flat) * lower_bound\n upper_bound_vec = torch.ones_like(uv_b_non_matches_1_flat) * upper_bound\n\n uv_b_non_matches_1_flat = where(uv_b_non_matches_1_flat > upper_bound_vec, \n uv_b_non_matches_1_flat - upper_bound_vec, \n uv_b_non_matches_1_flat)\n\n uv_b_non_matches_1_flat = where(uv_b_non_matches_1_flat < lower_bound_vec, \n uv_b_non_matches_1_flat + upper_bound_vec, \n uv_b_non_matches_1_flat)\n\n return (uv_b_non_matches_0_flat.view(num_matches, num_non_matches_per_match),\n uv_b_non_matches_1_flat.view(num_matches, num_non_matches_per_match))", "def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n # print(best_truth_overlap)\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior", "def locate_animals(img, target_dict, thres=0.8, scale=1):\n coord_dict = {}\n coord_list = []\n for idx in animals:\n target = target_dict[idx]\n w, h = target.shape[::-1]\n\n res = cv2.matchTemplate(img, target, cv2.TM_CCOEFF_NORMED)\n loc = np.where(res >= thres)\n loc = list(zip(*loc))\n boxes = [[int(ll[1]), int(ll[0]), w, h] for ll in loc]\n\n indices = cv2.dnn.NMSBoxes(boxes, [.8] * len(boxes), 0.5, 0.5)\n \n loc = [loc[i[0]] for i in indices]\n coord_dict[idx] = np.array(loc)\n if len(loc) != 0:\n coord_list.append(coord_dict[idx])\n\n return coord_dict, coord_list", "def face_template_matching(visible_image, thermal_image, bboxlist):\n x1, y1, x2, y2, confidence = bboxlist\n template = np.mean(\n visible_image[int(bboxlist[1]):int(bboxlist[3]), int(bboxlist[0]):int(bboxlist[2]), :], axis=-1)\n image = thermal_image[int(bboxlist[1]):int(bboxlist[3]), :]\n corr_coeff = cv2.matchTemplate(image.astype(np.float32), template.astype(np.float32), cv2.TM_CCOEFF_NORMED)\n corr_coeff = np.squeeze(corr_coeff)\n delta_x = np.argmax(corr_coeff) - x1\n visible_crops = np.array(bboxlist[:4], dtype=\"int\")\n thermal_crops = np.array([x1 + delta_x, y1, x2 + delta_x, y2], dtype=\"int\")\n visible_face = crop_face(visible_image, visible_crops, mode=\"visible\")\n thermal_face = crop_face(thermal_image, thermal_crops, mode=\"thermal\")\n return visible_face, thermal_face", "def get_matches(jig_sol, blocks_roi):\n\n match_data = {}\n height, width,= jig_sol.shape\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n #identify the puzzle peice number based on the peice block position in solution image\n identity = {\n (1, 1): 1,\n (2, 1): 2,\n (3, 1): 3,\n (1, 2): 4,\n (2, 2): 5,\n (3, 2): 6,\n (1, 3): 7,\n (2, 3): 8,\n (3, 3): 9,\n }\n\n #iterate through the blocks roi\n for i in blocks_roi.keys():\n blk = blocks_roi[i].copy()\n blk = cv2.cvtColor(blk,cv2.COLOR_BGR2GRAY)\n max_list = []\n\n #for eack blk rotate is by 90 degrees and try template matching\n for k in range(0,360,90):\n #cv2.resize(blk,(int(width/3),int(height/3)), interpolation= cv2.INTER_CUBIC)\n blk_copy = imutils.rotate_bound(blk, -k)\n\n #get the resulting heat map of template matching\n result = cv2.matchTemplate(jig_sol,blk_copy,cv2.TM_CCOEFF_NORMED)\n\n #get the max value and its location in the heat map\n _, max_val, _, max_loc = cv2.minMaxLoc(result)\n\n #append a tuple consisting of max location, value and peice rotation to max_list\n max_list.append((max_loc, max_val, k))#((k+1)*90)%360))\n\n #find the location with maximum value of template matching regardless of peice rotation\n top_left = max(max_list, key=lambda x: x[1])[0]\n\n #get the peice rotation of that template matching\n rot = max(max_list, key=lambda x: x[1])[2]\n\n #calculate the bottom right cordinates of the block\n bottom_right = (top_left[0] + int(width/3), top_left[1] + int(height/3))\n\n #find the center of the block\n centx = 0\n centy = 0\n for (l,m) in [top_left, bottom_right]:\n centx += l\n centy += m\n centx = int(centx/2)\n centy = int(centy/2)\n\n #get the puzzle peice block position in solution image\n piece = (math.ceil(3*centx/width), math.ceil(3*centy/height))\n\n if piece not in identity.keys():\n continue\n\n match_data.update({i: (identity[piece], rot)})\n\n return match_data" ]
[ "0.7127601", "0.7048085", "0.70070684", "0.6867089", "0.67477554", "0.6726216", "0.66068804", "0.6604652", "0.6569852", "0.6569769", "0.654987", "0.64846456", "0.6334263", "0.6333618", "0.6229734", "0.6184669", "0.6158788", "0.60949856", "0.60808086", "0.59501326", "0.59479684", "0.59380364", "0.5935076", "0.59125423", "0.5838101", "0.5816561", "0.581133", "0.58015716", "0.57976043", "0.5795898" ]
0.741164
0
Try to load the part this method is called by the connect method of this object and by cltremote.RemoteBase RemoteBase
def load_part(self, partname, remoteclassname): success = False logger.info(u"{} Loading of part: {}".format(self.uid, partname)) try: module = importlib.import_module("parts.{p}.{p}Remote".format( p=partname)) logger.info( le2mtrans(u"{j} Module parts.{p}.{p}Remote loaded").format( j=self.uid, p=partname)) rem_temp = getattr(module, remoteclassname) remote = rem_temp(self) self._remotes[partname] = remote logger.info(u"{} Part {} loaded successfully".format( self.uid, partname)) success = True except (KeyError, ImportError, AttributeError) as e: logger.critical( u"{} Error while loading part: {}".format(self.uid, e.message)) finally: return success
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(SlideMaster, self)._load(pkgpart, part_dict)\n\n # selectively unmarshal relationships for now\n for rel in self._relationships:\n # log.debug(\"SlideMaster Relationship %s\", rel._reltype)\n if rel._reltype == RT_SLIDELAYOUT:\n self.__slidelayouts._loadpart(rel._target)\n return self", "def _load(self, pkgpart, part_dict):\n # log.debug(\"loading part %s\", pkgpart.partname)\n\n # # set attributes from package part\n self.__content_type = pkgpart.content_type\n self.__partname = pkgpart.partname\n if pkgpart.partname.endswith('.xml'):\n self._element = oxml_fromstring(pkgpart.blob)\n else:\n self._load_blob = pkgpart.blob\n\n # discard any previously loaded relationships\n self._relationships = _RelationshipCollection()\n\n # load relationships and propagate load for related parts\n for pkgrel in pkgpart.relationships:\n # unpack working values for part to be loaded\n reltype = pkgrel.reltype\n target_pkgpart = pkgrel.target\n partname = target_pkgpart.partname\n content_type = target_pkgpart.content_type\n\n # create target part\n if partname in part_dict:\n part = part_dict[partname]\n else:\n part = Part(reltype, content_type)\n part_dict[partname] = part\n part._load(target_pkgpart, part_dict)\n\n # create model-side package relationship\n model_rel = _Relationship(pkgrel.rId, reltype, part)\n self._relationships._additem(model_rel)\n return self", "def __init__(self, content_type=None):\n super(BasePart, self).__init__()\n self.__content_type = content_type\n self.__partname = None\n self._element = None\n self._load_blob = None\n self._relationships = _RelationshipCollection()", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(Presentation, self)._load(pkgpart, part_dict)\n\n # side effect of setting reltype ordering is that rId values can be\n # changed (renumbered during resequencing), so must complete rewrites\n # of all four IdLst elements (notesMasterIdLst, etc.) internal to\n # presentation.xml to reflect any possible changes. Not sure if good\n # order in the .rels files is worth the trouble just yet, so\n # commenting this out for now.\n\n # # set reltype ordering so rels file ordering is readable\n # self._relationships._reltype_ordering = (RT_SLIDEMASTER,\n # RT_NOTESMASTER, RT_HANDOUTMASTER, RT_SLIDE, RT_PRESPROPS,\n # RT_VIEWPROPS, RT_TABLESTYLES, RT_THEME)\n\n # selectively unmarshal relationships for now\n for rel in self._relationships:\n # log.debug(\"Presentation Relationship %s\", rel._reltype)\n if rel._reltype == RT_SLIDEMASTER:\n self.__slidemasters._loadpart(rel._target)\n elif rel._reltype == RT_SLIDE:\n self.__slides._loadpart(rel._target)\n return self", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(BaseSlide, self)._load(pkgpart, part_dict)\n # unmarshal shapes\n self._shapes = ShapeCollection(self._element.cSld.spTree, self)\n # return self-reference to allow generative calling\n return self", "def load_device():", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(Slide, self)._load(pkgpart, part_dict)\n # selectively unmarshal relationships for now\n for rel in self._relationships:\n # log.debug(\"SlideMaster Relationship %s\", rel._reltype)\n if rel._reltype == RT_SLIDELAYOUT:\n self.__slidelayout = rel._target\n return self", "def connect(self):\n super(NERDmLoader, self).connect()\n self.lateloadr._client = self._client\n self.lateloadr._db = self._db\n self.relloadr._client = self._client\n self.relloadr._db = self._db", "def _load(self):\n raise NotImplementedError()", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(Image, self)._load(pkgpart, part_dict)\n # set file extension\n self.__ext = posixpath.splitext(pkgpart.partname)[1]\n # return self-reference to allow generative calling\n return self", "def load(self):\n self._really_load()", "def load():\n\n global R, P, NP, update, update_available, region_dict\n\n loader = GoSmartParameterLoader(gosmart._prefix)\n loader.initiate()\n\n R = loader.get_regions()\n P, NP = loader.get_parameters()\n\n region_dict = loader.get_region_dict()\n\n update = gosmart.status.StatusUpdater()\n update_available = update.connect()", "def __init__(self):\n self.load()", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(SlideLayout, self)._load(pkgpart, part_dict)\n\n # selectively unmarshal relationships we need\n for rel in self._relationships:\n # log.debug(\"SlideLayout Relationship %s\", rel._reltype)\n # get slideMaster from which this slideLayout inherits properties\n if rel._reltype == RT_SLIDEMASTER:\n self.__slidemaster = rel._target\n\n # return self-reference to allow generative calling\n return self", "def load(self):", "def load(self, p):\n return", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def _load_disk(self):", "def _load_disk(self):", "def load(self, plugin):\n self.rpc.call(MsfRpcMethod.PluginLoad, [plugin])", "def ImportModelPart(self):\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Importing model part.\")\n problem_path = os.getcwd()\n input_filename = self.settings[\"model_import_settings\"][\"input_filename\"].GetString()\n if self.is_restarted():\n self.get_restart_utility().LoadRestart()\n elif(self.settings[\"model_import_settings\"][\"input_type\"].GetString() == \"mdpa\"):\n # Import model part from mdpa file.\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Reading model part from file: \" + os.path.join(problem_path, input_filename) + \".mdpa\")\n KratosMultiphysics.ModelPartIO(input_filename).ReadModelPart(self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Finished reading model part from mdpa file.\")\n self.PrepareModelPartForSolver()\n else:\n raise Exception(\"Other model part input options are not yet implemented.\")\n KratosMultiphysics.Logger.PrintInfo(\"ModelPart\", self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]:: \", \"Finished importing model part.\")", "def _load_disk(self):\r\n pass", "async def load(self) -> None:\n pass", "def test_get_part(self):\n pass", "def do_load(self, line):\n cmd_args = io.parse_cmd_args(line, io.load_cmd_pattern)\n if cmd_args:\n success = self.manager.load(**cmd_args)\n if success:\n self.console_print(\"Yippee! load successful!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, the data could not be loaded from file.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def set_part(self, connection_part):\n self.part = connection_part", "def postLoad(self):\n pass" ]
[ "0.59609467", "0.5939916", "0.58889776", "0.581184", "0.57881945", "0.5732381", "0.5723686", "0.5715784", "0.567733", "0.5595642", "0.55748475", "0.555035", "0.5547567", "0.5536446", "0.5526269", "0.5427431", "0.54074895", "0.54074895", "0.54074895", "0.54074895", "0.539921", "0.539921", "0.5396471", "0.53935724", "0.5383008", "0.5372027", "0.53513163", "0.5320184", "0.5297453", "0.52842563" ]
0.7296799
0
Load the rlhuboplus model and a scene into openhubo. Returns a servocontroller and a reference robot to show desired movements vs. actual pose. The returned tuple contains the robots, controller, and a nametojointindex converter.
def load_rlhuboplus(env,scenename=None,stop=False): return _oh.load_scene(env,'rlhuboplus.robot.xml',scenename,stop)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\r\n\t\t# Publishers\r\n\t\tself._pub_rate = rospy.Publisher('robot/joint_state_publish_rate', UInt16, queue_size=10)\r\n\t\tself.image_pub = rospy.Publisher(\"baxter_view\",Image,queue_size=4)\r\n\t\tself._obj_state = rospy.ServiceProxy(\"/gazebo/set_model_state\",SetModelState)\r\n\t\t\r\n\t\t# Link with baxter interface\r\n\t\tself._left_arm = baxter_interface.limb.Limb(\"left\")\r\n\t\tself._right_arm = baxter_interface.limb.Limb(\"right\")\r\n\t\tself._left_joint_names = self._left_arm.joint_names()\r\n\t\tself.grip_left = baxter_interface.Gripper('left', CHECK_VERSION)\r\n\r\n\t\tprint(\"Getting robot state... \")\r\n\t\tself._rs = baxter_interface.RobotEnable(CHECK_VERSION)\r\n\t\tself._init_state = self._rs.state().enabled\r\n\t\tprint(\"Enabling robot... \")\r\n\t\tself._rs.enable()\r\n\t\t\r\n\t\t# Control parameters\r\n\t\tself._rate = 500.0 # Hz\r\n\t\tself._pub_rate.publish(self._rate)\r\n\t\tself.bridge = CvBridge()\r\n\t\tself._left_arm.set_joint_position_speed(0.3)\r\n\t\tself._object_type = 0\r\n\t\tself.object_position = Point(x=0.0, y=0.0, z=0.0)\r\n\t\tself.object_v = 0.0", "def REP120_compatibility():\n # TODO Add toe frames for ROMEO (not supported by NAOqi yet)\n global robot, NAME, MESH_VERSION, VERSION, LINKS_DICO, OFFSETS_DICO\n print('creating and renaming joints & links to comply to REP120')\n\n # Rename links\n for joint in robot.joints:\n if robot.joints[joint].name.endswith('_joint'):\n robot.joints[joint].name = robot.joints[joint].name[0:-6]\n if robot.joints[joint].name.endswith('_actuator'):\n robot.joints[joint].name = robot.joints[joint].name[0:-9]\n if robot.joints[joint].mimic is not None:\n if robot.joints[joint].mimic.joint_name.endswith('_actuator'):\n robot.joints[joint].mimic.joint_name = \\\n robot.joints[joint].mimic.joint_name[0:-9]\n if robot.joints[joint].mimic.joint_name.endswith('_joint'):\n robot.joints[joint].mimic.joint_name = \\\n robot.joints[joint].mimic.joint_name[0:-6]\n try:\n robot.joints[joint].parent = LINKS_DICO[robot.joints[joint].parent]\n except KeyError:\n pass\n try:\n robot.joints[joint].child = LINKS_DICO[robot.joints[joint].child]\n except KeyError:\n pass\n for link in robot.links.keys():\n try:\n robot.rename_link(link, LINKS_DICO[link])\n except KeyError, ValueError:\n pass\n\n if NAME == 'romeo':\n robot.add_link(ur.Link('gaze'))\n robot.add_joint(ur.Joint('gaze_joint', 'HeadRoll_link',\n 'gaze', 'fixed', None, ur.Pose(\n (OFFSETS_DICO['CameraLeftEyeOffsetX'], 0,\n OFFSETS_DICO['CameraLeftEyeOffsetZ']), (0, 0, 0))))\n MESH_VERSION = ''\n\n elif NAME == 'nao':\n robot.add_link(ur.Link('gaze'))\n robot.add_joint(ur.Joint('gaze_joint', 'Head',\n 'gaze', 'fixed', None, ur.Pose(\n (OFFSETS_DICO['CameraTopV4OffsetX'], 0,\n OFFSETS_DICO['CameraTopV4OffsetZ']), (0, 0, 0))))\n if VERSION == 'V32':\n MESH_VERSION = VERSION\n elif VERSION == 'V33' or VERSION == 'V40' or VERSION == 'V50':\n MESH_VERSION = 'V40'\n\n elif NAME == 'pepper':\n MESH_VERSION = VERSION\n # add base_footprint frame\n robot.add_link(ur.Link('base_footprint'))\n robot.add_joint(ur.Joint('base_footprint_joint', 'Tibia',\n 'base_footprint', 'fixed', None, ur.Pose(\n (OFFSETS_DICO['BaseFootprintOffsetX'],\n OFFSETS_DICO['BaseFootprintOffsetY'],\n OFFSETS_DICO['BaseFootprintOffsetZ']),\n (OFFSETS_DICO['BaseFootprintRotX'],\n OFFSETS_DICO['BaseFootprintRotY'],\n OFFSETS_DICO['BaseFootprintRotZ']))))\n\n # rename the laser frames to sensor frames\n # (they are actually not used for computation)\n laser_links = [c for c in robot.links.keys()\n if 'surrounding' in c.lower()]\n for joint in robot.joints.values():\n if joint.child in laser_links:\n laser_frame = joint.child\n laser_device_frame = laser_frame[:-5] + 'device_frame'\n # get the old joint to have the device frame as a child\n joint.child = laser_device_frame\n # but also create a joint with the projected frame as a child\n robot.add_link(ur.Link(laser_device_frame))\n joint_new = copy.deepcopy(joint)\n joint_new.name = joint.name[:-17] + \\\n 'projected_sensor_fixedjoint'\n joint_new.child = laser_frame\n joint_new.origin.rotation[0] = 0\n joint_new.origin.rotation[1] = 0\n # set it on the ground\n joint_new.origin.position[2] = -0.334\n if 'left' in laser_frame.lower():\n # the following line is a temporary fix\n # that should be fixed upstream\n joint_new.origin.rotation[2] = math.pi/2.0 + \\\n 0.1864836732051034\n elif 'right' in laser_frame.lower():\n # the following line is a temporary fix\n # that should be fixed upstream\n joint.origin.position[0] = -0.018\n joint_new.origin.position[0] = -0.018\n # the following line is a temporary fix\n # that should be fixed upstream\n joint_new.origin.rotation[2] = -math.pi/2.0 \\\n - 0.1864836732051034\n elif 'front' in laser_frame.lower():\n joint_new.origin.rotation[2] = 0\n robot.add_joint(joint_new)\n\n # add an optical frame for each robot\n camera_frames = [c for c in robot.links.keys() if 'camera' in c.lower()]\n for camera_frame in camera_frames:\n camera_optical_frame = camera_frame[:-6] + '_optical_frame'\n robot.add_link(ur.Link(camera_optical_frame))\n robot.add_joint(ur.Joint('%s_fixedjoint' % camera_optical_frame,\n camera_frame, camera_optical_frame, 'fixed', None,\n ur.Pose((0, 0, 0), (-math.pi/2.0, 0, -math.pi/2.0))))\n\n # add dummy physics for gazebo simulation\n add_dummy_inertia(['Finger', 'Thumb', 'gripper', 'Fsr'])\n add_dummy_collision(['Fsr'])", "def init_gui_from_robot(gui, robot):\n\n global ftm_list # Forward transition matrices list\n global btm_list # Backward transition matrices list\n global fk_list\n global jac_list\n global com_list\n global com_jac_list\n global polynomial_trajectories\n\n # Robot Information ......................................................\n\n # Paragraph syntax\n p = '<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; ' + \\\n 'margin-right:0px; -qt-block-indent:0; text-indent:0px;\">'\n header = '<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" ' + \\\n '\"http://www.w3.org/TR/REC-html40/strict.dtd\">\\n<html><head' \\\n '><meta' + \\\n 'name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\\np, ' \\\n 'li { ' + \\\n 'white-space: pre-wrap; }\\n</style></head><body style=\" ' \\\n 'font-family' + \\\n ':\\'MS Shell Dlg 2\\'; font-size:10pt; font-weight:400; ' + \\\n 'font-style:normal;\">\\n'\n\n html = header + p + \"<b>Robot Name</b> : \" + f\"{robot.name}</p><br/>\\n\"\n html += p + \"<b>Number of Joints</b> : \" + f\"{robot.njoints()}</p><br/>\\n\"\n html += p + \"<b>Number of Links</b> : \" + f\"{robot.nlinks()}</p><br/>\\n\"\n mass = 0\n for link in robot.links:\n mass += link.mass\n html += p + \"<b>Mass</b> : \" + '%.3f' % mass + \" kg</p>\"\n html += \"</body></html>\"\n\n gui.textEdit_info.setHtml(html)\n\n # Tree Representation ....................................................\n\n gui.treeWidget_info.clear()\n gui.treeWidget_info.setHeaderLabels(['Name', 'Type'])\n\n list_items = []\n list_nodes = []\n\n i = 0\n\n # Iterating over the tree\n for _, _, node in robot.tree:\n\n # Finding parent\n if node.is_root:\n parent = gui.treeWidget_info\n else:\n parent = list_items[list_nodes.index(node.parent)]\n\n item = QTreeWidgetItem(parent)\n\n # Text\n type_, nb = node.name.split('_')\n nb = int(nb)\n\n if type_ == 'joint':\n text = robot.joints[nb].name\n text1 = robot.joints[nb].joint_type + \" Joint\"\n else:\n text = robot.links[nb].name\n text1 = \"Link\"\n\n item.setText(0, text)\n item.setText(1, text1.title())\n\n list_items.append(item)\n list_nodes.append(node)\n i += 1\n\n # Expand the whole tree\n gui.treeWidget_info.expandItem(item)\n\n gui.checkBox_fk_x.setChecked(True)\n gui.checkBox_fk_y.setChecked(True)\n gui.checkBox_fk_z.setChecked(True)\n gui.checkBox_fk_orientation.setChecked(True)\n gui.checkBox_jac_x.setChecked(True)\n gui.checkBox_jac_y.setChecked(True)\n gui.checkBox_jac_z.setChecked(True)\n gui.checkBox_jac_wx.setChecked(True)\n gui.checkBox_jac_wy.setChecked(True)\n gui.checkBox_jac_wz.setChecked(True)\n gui.checkBox_com_x.setChecked(True)\n gui.checkBox_com_y.setChecked(True)\n gui.checkBox_com_z.setChecked(True)\n gui.checkBox_com_jac_x.setChecked(True)\n gui.checkBox_com_jac_y.setChecked(True)\n gui.checkBox_com_jac_z.setChecked(True)\n\n # Transition Matrices ....................................................\n\n ftm_list = [] # Forward transition matrices list\n btm_list = [] # Backward transition matrices list\n\n # Adding each matrix to both lists\n gui.listWidget_ftm.clear()\n gui.listWidget_btm.clear()\n gui.comboBox_btm_joint.clear()\n gui.comboBox_ftm_joint.clear()\n i = 0\n for pre, _, node in robot.tree:\n # Text\n type_, nb = node.name.split('_')\n nb = int(nb)\n\n if type_ == 'joint':\n text = robot.joints[nb].name\n gui.listWidget_ftm.addItem(text)\n gui.listWidget_btm.addItem(text)\n\n # Combo boxes\n gui.comboBox_btm_joint.addItem(pre + robot.joints[nb].name)\n gui.comboBox_ftm_joint.addItem(pre + robot.joints[nb].name)\n\n gui.comboBox_ftm_joint.model().item(i).setEnabled(False)\n gui.comboBox_btm_joint.model().item(i).setEnabled(False)\n\n ftm_list.append('joint_' + str(i))\n btm_list.append('joint_' + str(i))\n i += 1\n\n gui.pushButton_ftm_add.setEnabled(False)\n gui.pushButton_btm_add.setEnabled(False)\n\n # Froward Kinematics and Jacobians .......................................\n\n fk_list = []\n jac_list = []\n\n gui.listWidget_fk.clear()\n gui.listWidget_jac.clear()\n gui.comboBox_fk_origin.clear()\n gui.comboBox_jac_origin.clear()\n gui.comboBox_fk_destination.clear()\n gui.comboBox_jac_destination.clear()\n gui.comboBox_loops_origin.clear()\n gui.comboBox_loops_destination.clear()\n\n all_roots = []\n all_leaves = []\n\n # Adding every root --> leaf path to the FK list\n for pre, _, node in robot.tree:\n\n # Text\n type_, nb = node.name.split('_')\n nb = int(nb)\n\n if node.is_root:\n all_roots.append(node.name)\n\n if node.is_leaf:\n all_leaves.append(node.name)\n\n # Combo boxes\n if type_ == 'joint':\n name = robot.joints[nb].name\n else:\n name = robot.links[nb].name\n\n gui.comboBox_fk_origin.addItem(pre + name)\n gui.comboBox_jac_origin.addItem(pre + name)\n gui.comboBox_loops_origin.addItem(pre + name)\n gui.comboBox_loops_origin_2.addItem(pre + name)\n gui.comboBox_fk_destination.addItem(pre + name)\n gui.comboBox_jac_destination.addItem(pre + name)\n gui.comboBox_loops_destination.addItem(pre + name)\n gui.comboBox_loops_destination_2.addItem(pre + name)\n\n for root in all_roots:\n root_type, rnb = root.split('_')\n rnb = int(rnb)\n if root_type == 'joint':\n root_name = robot.joints[rnb].name\n else:\n root_name = robot.links[rnb].name\n\n for leaf in all_leaves:\n fk_list.append([root, leaf, 'xyzo'])\n jac_list.append([root, leaf, 'xyzrpY'])\n leaf_type, lnb = leaf.split('_')\n lnb = int(lnb)\n if leaf_type == 'joint':\n leaf_name = robot.joints[lnb].name\n else:\n leaf_name = robot.links[lnb].name\n\n gui.listWidget_fk.addItem(root_name + ' ==> ' + leaf_name +\n ' ' + parse_content('xyzo'))\n gui.listWidget_jac.addItem(root_name + ' ==> ' + leaf_name +\n ' ' + parse_content('xyzrpY'))\n\n # Checking checkboxes\n gui.checkBox_ftm.setChecked(True)\n gui.checkBox_btm.setChecked(False)\n gui.checkBox_fk.setChecked(True)\n gui.checkBox_jac.setChecked(True)\n\n # CoM ....................................................................\n\n gui.listWidget_com.clear()\n gui.listWidget_com_jac.clear()\n satus_ = robot.mass > 0\n if satus_:\n gui.listWidget_com.addItem(\"Center of Mass (x, y, z)\")\n com_list = [\"xyz\"]\n gui.listWidget_com_jac.addItem(\"Center of Mass Jacobian (x, y, z)\")\n com_jac_list = [\"xyz\"]\n else:\n com_list = []\n com_jac_list = []\n gui.checkBox_com.setChecked(satus_)\n gui.checkBox_com_x.setChecked(satus_)\n gui.checkBox_com_y.setChecked(satus_)\n gui.checkBox_com_z.setChecked(satus_)\n gui.checkBox_com_jac.setChecked(satus_)\n gui.checkBox_com_jac_x.setChecked(satus_)\n gui.checkBox_com_jac_y.setChecked(satus_)\n gui.checkBox_com_jac_z.setChecked(satus_)\n\n gui.checkBox_com.setEnabled(satus_)\n gui.listWidget_com.setEnabled(satus_)\n gui.pushButton_add_com.setEnabled(satus_)\n gui.pushButton_del_com.setEnabled(satus_)\n gui.checkBox_com_x.setEnabled(satus_)\n gui.checkBox_com_y.setEnabled(satus_)\n gui.checkBox_com_z.setEnabled(satus_)\n gui.checkBox_com_jac.setEnabled(satus_)\n gui.listWidget_com_jac.setEnabled(satus_)\n gui.pushButton_add_com_jac.setEnabled(satus_)\n gui.pushButton_del_com_jac.setEnabled(satus_)\n gui.checkBox_com_jac_x.setEnabled(satus_)\n gui.checkBox_com_jac_y.setEnabled(satus_)\n gui.checkBox_com_jac_z.setEnabled(satus_)\n\n # Polynomial Trajectories ................................................\n\n polynomial_trajectories = [{\"name\": \"r\",\n \"conditions\": [[\"0\", \"0\", \"0\"],\n [\"0\", \"tf\", \"1\"],\n [\"1\", \"0\", \"0\"],\n [\"1\", \"tf\", \"0\"],\n [\"2\", \"0\", \"0\"],\n [\"2\", \"tf\", \"0\"]\n ]}]\n gui.listWidget_poly.clear()\n gui.listWidget_poly.addItem(\"r\")\n\n gui.pushButton_poly_del.setEnabled(True)\n gui.pushButton_poly_new_traj.setEnabled(True)\n gui.pushButton_poly_del_condition.setEnabled(False)\n gui.pushButton_poly_new_condition.setEnabled(False)\n gui.listWidget_poly.setEnabled(True)\n gui.tableWidget_poly_conditions.setEnabled(False)\n gui.lineEdit_poly_fname.setEnabled(True)\n\n gui.lineEdit_fname.setText(robot.name)\n update_settings(gui)\n\n gui.pushButton_generate.setEnabled(True)\n\n # Loops ..................................................................\n\n gui.comboBox_loops_trajectory.clear()\n gui.comboBox_loops_trajectory.addItem(\"(None)\")\n gui.comboBox_loops_trajectory.addItem(\"r\")\n gui.comboBox_loops_trajectory.setCurrentText(\"r\")\n gui.radioButton_loops_effector.setChecked(True)\n gui.checkBox_loops_x.setChecked(True)\n gui.checkBox_loops_y.setChecked(True)\n gui.checkBox_loops_z.setChecked(True)\n gui.checkBox_loops_wx.setChecked(True)\n gui.checkBox_loops_wy.setChecked(True)\n gui.checkBox_loops_wz.setChecked(True)\n gui.radioButton_loops_none_2.setChecked(True)\n\n gui.radioButton_loops_geometric.setChecked(True)", "def _load_model(self):\n super()._load_model()\n\n # Adjust base pose(s) accordingly\n if self.env_configuration == \"bimanual\":\n xpos = self.robots[0].robot_model.base_xpos_offset[\"table\"](self.table_full_size[0])\n self.robots[0].robot_model.set_base_xpos(xpos)\n else:\n if self.env_configuration == \"single-arm-opposed\":\n # Set up robots facing towards each other by rotating them from their default position\n for robot, rotation in zip(self.robots, (np.pi/2, -np.pi/2)):\n xpos = robot.robot_model.base_xpos_offset[\"table\"](self.table_full_size[0])\n rot = np.array((0, 0, rotation))\n xpos = T.euler2mat(rot) @ np.array(xpos)\n robot.robot_model.set_base_xpos(xpos)\n robot.robot_model.set_base_ori(rot)\n else: # \"single-arm-parallel\" configuration setting\n # Set up robots parallel to each other but offset from the center\n for robot, offset in zip(self.robots, (-0.25, 0.25)):\n xpos = robot.robot_model.base_xpos_offset[\"table\"](self.table_full_size[0])\n xpos = np.array(xpos) + np.array((0, offset, 0))\n robot.robot_model.set_base_xpos(xpos)\n\n # load model for table top workspace\n self.mujoco_arena = TableArena(\n table_full_size=self.table_full_size, table_friction=self.table_friction\n )\n if self.use_indicator_object:\n self.mujoco_arena.add_pos_indicator()\n\n # Arena always gets set to zero origin\n self.mujoco_arena.set_origin([0, 0, 0])\n\n # initialize objects of interest\n self.pot = PotWithHandlesObject()\n self.mujoco_objects = OrderedDict([(\"pot\", self.pot)])\n\n # task includes arena, robot, and objects of interest\n self.model = TableTopTask(\n self.mujoco_arena,\n [robot.robot_model for robot in self.robots],\n self.mujoco_objects,\n initializer=self.placement_initializer,\n )\n self.model.place_objects()", "def load_real_robot_state(self):\n self.robotModel.setConfig(motion.robot.getKlamptSensedPosition())", "def import_robot(rigs_dir):\n\n # If the scene is in IK mode, switch to FK before importing the robot\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTabIndex=True)\n if current_tab == 2:\n pm.tabLayout('switcher_tab_layout', edit=True, selectTabIndex=1)\n\n rigs = general_utils.get_rigs_dict()\n rig_names = general_utils.get_rigs_names(rigs)\n for rig_name in rig_names:\n try:\n if pm.optionMenu('robotImportList',\n query=True,\n value=True) == rig_name:\n try:\n rig_path = rigs[rig_name]\n pm.importFile(rig_path,\n defaultNamespace=True,\n returnNewNodes=True)\n except:\n pm.warning('Error Loading ' + rig_name)\n except:\n pm.warning('No robots found; check rig directory')", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def __init__(self):\n self.node_name = \"face_ctrl\"\n rospy.init_node(self.node_name)\n\n rospy.loginfo(\"[FACE] initializing controller\")\n\n self.ros_param_data = self.read_ros_parameters()\n\n # initializing camera\n self.face_cascade_name = self.ros_param_data[\"frontal_face_xml_path\"]\n self.eyes_cascade_name = self.ros_param_data[\"eye_xml_path\"]\n self.face_cascade = cv.CascadeClassifier()\n self.eyes_cascade = cv.CascadeClassifier()\n self.camera_device = 2\n self.cap = cv.VideoCapture(self.camera_device, cv.CAP_V4L)\n if self.cap is None or not self.cap.isOpened():\n rospy.logerr(\"[FACE] Could not connect to the camera!\")\n self.check_camera()\n _, frame = self.cap.read()\n self.image_height, self.image_width = frame.shape[:2]\n\n # initialize publisher\n self.ros_pub_servo_array = rospy.Publisher(\"/cmd_servo_array\", ServoMotorArray, queue_size=1)\n self.bridge = CvBridge()\n self.ros_pub_image = rospy.Publisher(\"camera/image\", Image, queue_size=1)\n rospy.loginfo(\"[FACE] initalized publisher\")\n\n # initialize subscriber\n self.servo_ids = []\n self.servo_angles = []\n self.servo_array_msg = ServoMotorArray()\n self.servo_array_msg.servos = []\n for i in range(3):\n self.servo_array_msg.servos.append(ServoMotor_msg())\n\n self.ros_sub_servo_array = rospy.Subscriber(\"/low_level_ctrl/servo_array\", ServoMotorArray, self.store_servo_state)\n rospy.loginfo(\"[FACE] initialized subscriber\")\n\n # initialize the controllers\n param_pid_yaw = self.ros_param_data[\"yaw_controller_gains\"]\n self.pid_controller_yaw = PIDController(param_pid_yaw[\"kp\"], param_pid_yaw[\"ki\"], param_pid_yaw[\"kd\"])\n param_pid_pitch = self.ros_param_data[\"pitch_controller_gains\"]\n self.pid_controller_pitch = PIDController(param_pid_pitch[\"kp\"], param_pid_pitch[\"ki\"], param_pid_pitch[\"kd\"])\n\n rospy.loginfo(\"[FACE] node initialization finished\")", "def load_params(self, event):\n \n self.robot_type = rospy.get_param(\"robot_type\" , 'pendulum' )\n self.robot_config = rospy.get_param(\"robot_config\", 'wrist-only' )\n self.robot_ctl = rospy.get_param(\"controller\", 'RfixCTC' )\n self.fixed_mode = rospy.get_param(\"fixed_mode\", 1 )\n \n \n ###############################################\n # Load robot model for the right configuration\n if self.robot_config == 'wrist-only':\n self.R = Proto.SingleRevoluteDSDM()\n \n elif self.robot_config == 'dual-plane' :\n self.R = Proto.TwoPlanarSerialDSDM()\n \n else:\n self.R = None\n \n ###############################################\n # Load controller\n if self.robot_ctl == 'RfixCTC' :\n self.Ctl = RminCTC.RfixComputedTorqueController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminCTC' :\n self.Ctl = RminCTC.RminComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RfixSLD' :\n self.Ctl = RminCTC.RfixSlidingModeController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminSLD' :\n self.Ctl = RminCTC.RminSlidingModeController( self.R )\n \n elif self.robot_ctl == 'RollCTC' :\n self.Ctl = RollCTC.RolloutComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl = RollCTC.RolloutSlidingModeController( self.R )\n \n else:\n self.Ctl = None\n \n \n if self.robot_config == 'wrist-only':\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 2 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0,0] ) )\n \n elif self.robot_config == 'dual-plane' :\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 4 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0.0,0.0,0.0,0.0] ) )\n #self.x_d = np.array( [-3.14 , 0 , 0 , 0] )\n \n # Gen ctl params\n self.Ctl.hysteresis = rospy.get_param(\"hysteresis\", True )\n self.Ctl.min_delay = rospy.get_param(\"min_delay\", 0.5 )\n \n self.Ctl.w0 = rospy.get_param(\"w0\", 1 )\n self.Ctl.zeta = rospy.get_param(\"zeta\", 0.7 )\n \n self.Ctl.lam = rospy.get_param(\"lam\", 1 )\n self.Ctl.nab = rospy.get_param(\"nab\", 1 )\n self.Ctl.D = rospy.get_param(\"D\", 0 )\n \n self.Ctl.horizon = rospy.get_param(\"horizon\", 0.5 )\n self.Ctl.sim_dt = rospy.get_param(\"sim_dt\", 0.1 )\n \n self.Ctl.domain_check = rospy.get_param(\"domain_check\", False )\n \n # Base policy param for roll \n if self.robot_ctl == 'RollCTC' :\n self.Ctl.FixCtl.lam = self.Ctl.lam\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl.FixCtl.lam = self.Ctl.lam \n self.Ctl.FixCtl.nab = self.Ctl.nab \n self.Ctl.FixCtl.D = self.Ctl.D", "def get_model_and_assets():\n\n return common.read_model('cloth_corner.xml'),common.ASSETS", "def set_robot(self, x, y):\n state = ModelState()\n state.model_name = 'turtlebot3_waffle_pi'\n state.reference_frame = 'world'\n # pose\n state.pose.position.x = x\n state.pose.position.y = y\n state.pose.position.z = 0\n quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)\n state.pose.orientation.x = quaternion[0]\n state.pose.orientation.y = quaternion[1]\n state.pose.orientation.z = quaternion[2]\n state.pose.orientation.w = quaternion[3]\n # twist\n state.twist.linear.x = 0\n state.twist.linear.y = 0\n state.twist.linear.z = 0\n state.twist.angular.x = 0\n state.twist.angular.y = 0\n state.twist.angular.z = 0\n\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = self.set_state\n result = set_state(state)\n assert result.success is True\n except rospy.ServiceException:\n print(\"/gazebo/get_model_state service call failed\")", "def get_joint_detection_model(model_path, model_type):\n # config_file_path = '/usr/local/bin/config'\n if model_type == 'Foot_detection':\n # with open('/usr/local/bin/src/config.ini','w') as f:\n # f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1.2 1.5 2 2.5 3\\nscales =1 1.5 2\\n')\n\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone('resnet50').retinanet,\n num_classes=5,\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=read_config_file('/usr/local/bin/Config files/config_foot.ini'))\n\n training_model.load_weights(model_path)\n infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_foot.ini')))\n\n elif model_type == 'Hand_detection':\n # with open('/usr/local/bin/src/config.ini','w') as f:\n # f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1 1.5 2 2.5 3\\nscales = 1 1.2 1.6\\n')\n\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone('resnet50').retinanet,\n num_classes=6,\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=read_config_file('/usr/local/bin/Config files/config_hand.ini'))\n training_model.load_weights(model_path)\n infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_hand.ini')))\n \n return infer_model", "def __init__(self):\n # Manage command line args\n args = ut_generic.getParserArgsRobot().parse_args()\n self.gzclient = args.gzclient\n self.realSpeed = args.realSpeed\n # self.realSpeed = True\n self.debug = args.debug\n self.multiInstance = args.multiInstance\n self.port = args.port\n # Set the path of the corresponding URDF file\n if self.realSpeed:\n urdf = \"biped.urdf\"\n self.urdfPath = get_prefix_path(\n \"lobot_description\") + \"/share/lobot_description/robots/\" + urdf\n else:\n print(\"Non real speed not yet supported. Use real speed instead. \")\n\n # TODO: Include launch logic here, refer to code from the .launch.py files\n # Note that after including the launch logic the code will no longer be debuggable due to multi process stuff\n\n # Create the node after the new ROS_DOMAIN_ID is set in generate_launch_description()\n rclpy.init()\n self.node = rclpy.create_node(self.__class__.__name__)\n\n # class variables\n self._observation_msg = None\n self.max_episode_steps = 1024 # default value, can be updated from baselines\n self.iterator = 0\n self.reset_jnts = True\n self._collision_msg = None\n\n #############################\n # Environment hyperparams\n #############################\n EE_POINTS = np.asmatrix([[0, 0, 0]])\n EE_VELOCITIES = np.asmatrix([[0, 0, 0]])\n\n # # Topics for the robot publisher and subscriber.\n JOINT_PUBLISHER = '/lobot_arm/control'\n # Get Joint names from the parameter server\n get_joints_client = self.node.create_client(GetAllJoints, \"/GetAllControlJoints\",\n qos_profile=qos_profile_services_default)\n req = GetAllJoints.Request()\n req.robot = \"lobot_arm\"\n while not get_joints_client.wait_for_service(timeout_sec=3.0):\n self.node.get_logger().info('service not available, waiting again...')\n\n future = get_joints_client.call_async(req)\n rclpy.spin_until_future_complete(self.node, future)\n if future.result() is not None:\n joint_names = future.result().joints\n self.node.get_logger().info(\n 'Number of joints: %d' %\n (len(joint_names)))\n else:\n self.node.get_logger().info('Service call failed %r' % (future.exception(),))\n JOINT_ORDER = joint_names\n INITIAL_JOINTS = np.full((len(joint_names)), 0.0).tolist()\n reset_condition = {\n 'initial_positions': INITIAL_JOINTS,\n 'initial_velocities': []\n }\n #############################\n\n m_jointOrder = copy.deepcopy(JOINT_ORDER)\n\n # Initialize target end effector position\n self.environment = {\n 'jointOrder': m_jointOrder,\n 'reset_conditions': reset_condition,\n 'tree_path': self.urdfPath,\n 'end_effector_points': EE_POINTS,\n }\n\n # Subscribe to the appropriate topics, taking into account the particular robot\n self._pub = self.node.create_publisher(JointControl, JOINT_PUBLISHER, qos_profile=qos_profile_sensor_data)\n self._sub = self.node.create_subscription(JointState, \"/joint_states\", self.observation_callback,\n qos_profile_sensor_data)\n\n # TODO: Make the clock node run on a separate thread so weird issues like outdated clock can stop happening\n self.lock = threading.Lock()\n self.clock_node = rclpy.create_node(self.__class__.__name__ + \"_clock\")\n self._sub_clock = self.clock_node.create_subscription(RosClock, '/clock', self.clock_callback,\n qos_profile=qos_profile_sensor_data)\n self.exec = rclpy.executors.MultiThreadedExecutor()\n self.exec.add_node(self.clock_node)\n t1 = threading.Thread(target=self.spinClockNode, daemon=True)\n t1.start()\n # self._imu_sub = self.node.create_subscription(JointState, \"/lobot_IMU_controller/out\", self.imu_callback, qos_profile_sensor_data)\n # self._sub = self.node.create_subscription(JointTrajectoryControllerState, JOINT_SUBSCRIBER, self.observation_callback, qos_profile=qos_profile_sensor_data)\n self._reset_sim = self.node.create_client(Empty, '/reset_simulation')\n self._physics_pauser = self.node.create_client(Empty, '/pause_physics')\n self._robot_resetter = self.node.create_client(Empty, '/lobot_arm/reset')\n self._physics_unpauser = self.node.create_client(Empty, '/unpause_physics')\n self.delete_entity = self.node.create_client(DeleteEntity, '/delete_entity')\n self.numJoints = len(JOINT_ORDER)\n # Initialize a KDL Jacobian solver from the chain.\n # self.jacSolver = ChainJntToJacSolver(self.mara_chain)\n\n # Observable dimensions, each joint has 2 (joint position + joint velocity), the IMU gives 6\n self.obs_dim = self.numJoints * 2 + 6\n\n # # Here idially we should find the control range of the robot. Unfortunatelly in ROS/KDL there is nothing like this.\n # # I have tested this with the mujoco enviroment and the output is always same low[-1.,-1.], high[1.,1.]\n\n low = -np.pi * np.ones(self.numJoints) * 0.4\n high = np.pi * np.ones(self.numJoints) * 0.4\n\n self.action_space = spaces.Box(low, high)\n\n high = np.inf * np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n self.buffer_dist_rewards = []\n self.buffer_tot_rewards = []\n self.collided = 0\n\n # Set the time source\n self._sim_time = 0\n self._sim_time_msg = builtin_interfaces.msg.Time()", "def __init__(self):\n ros_ws_abspath = rospy.get_param(\"/drone/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"drone_construct\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/parrotdrone/config\",\n yaml_file_name=\"parrotdrone_goto.yaml\")\n\n # Only variable needed to be set here\n number_actions = rospy.get_param('/drone/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n\n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n\n # Actions and Observations\n self.linear_forward_speed = rospy.get_param(\n '/drone/linear_forward_speed')\n self.angular_turn_speed = rospy.get_param('/drone/angular_turn_speed')\n self.angular_speed = rospy.get_param('/drone/angular_speed')\n\n self.init_linear_speed_vector = Vector3()\n self.init_linear_speed_vector.x = rospy.get_param(\n '/drone/init_linear_speed_vector/x')\n self.init_linear_speed_vector.y = rospy.get_param(\n '/drone/init_linear_speed_vector/y')\n self.init_linear_speed_vector.z = rospy.get_param(\n '/drone/init_linear_speed_vector/z')\n\n self.init_angular_turn_speed = rospy.get_param(\n '/drone/init_angular_turn_speed')\n\n self.min_sonar_value = rospy.get_param('/drone/min_sonar_value')\n self.max_sonar_value = rospy.get_param('/drone/max_sonar_value')\n\n # Get WorkSpace Cube Dimensions\n self.work_space_x_max = rospy.get_param(\"/drone/work_space/x_max\")\n self.work_space_x_min = rospy.get_param(\"/drone/work_space/x_min\")\n self.work_space_y_max = rospy.get_param(\"/drone/work_space/y_max\")\n self.work_space_y_min = rospy.get_param(\"/drone/work_space/y_min\")\n self.work_space_z_max = rospy.get_param(\"/drone/work_space/z_max\")\n self.work_space_z_min = rospy.get_param(\"/drone/work_space/z_min\")\n\n # Maximum RPY values\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n self.max_yaw = rospy.get_param(\"/drone/max_yaw\")\n\n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/drone/desired_pose/x\")\n self.desired_point.y = rospy.get_param(\"/drone/desired_pose/y\")\n self.desired_point.z = rospy.get_param(\"/drone/desired_pose/z\")\n\n self.desired_point_epsilon = rospy.get_param(\n \"/drone/desired_point_epsilon\")\n\n # We place the Maximum and minimum values of the X,Y,Z,R,P,Yof the pose\n\n high = numpy.array([self.work_space_x_max,\n self.work_space_y_max,\n self.work_space_z_max,\n self.max_roll,\n self.max_pitch,\n self.max_yaw,\n self.max_sonar_value])\n\n low = numpy.array([self.work_space_x_min,\n self.work_space_y_min,\n self.work_space_z_min,\n -1*self.max_roll,\n -1*self.max_pitch,\n -numpy.inf,\n self.min_sonar_value])\n\n self.observation_space = spaces.Box(low, high)\n\n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\" +\n str(self.observation_space))\n\n # Rewards\n self.closer_to_point_reward = rospy.get_param(\n \"/drone/closer_to_point_reward\")\n self.not_ending_point_reward = rospy.get_param(\n \"/drone/not_ending_point_reward\")\n self.end_episode_points = rospy.get_param(\"/drone/end_episode_points\")\n\n self.cumulated_steps = 0.0\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(ParrotDroneGotoEnv, self).__init__(ros_ws_abspath)", "def load_model(self, sess, pb_model_path):\n\n logging.info(\"Import yolo model from pb start .......\")\n\n with sess.as_default():\n with sess.graph.as_default():\n with tf.gfile.FastGFile(pb_model_path, 'rb') as f_handle:\n logging.info(\"ParseFromString start .......\")\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_handle.read())\n logging.info(\"ParseFromString end .......\")\n\n tf.import_graph_def(graph_def, name='')\n logging.info(\"Import_graph_def end .......\")\n\n logging.info(\"Import yolo model from pb end .......\")", "def load_tokenizer_and_model(model=\"microsoft/DialoGPT-large\"):\n # Initialize tokenizer and model\n print(\"Loading model...\")\n tokenizer = AutoTokenizer.from_pretrained(model)\n model = AutoModelForCausalLM.from_pretrained(model)\n \n # Return tokenizer and model\n return tokenizer, model", "def __init__(self, dtype, robot_path, joint_coord, joint_states):\n self.robot = RobotModel(robot_path)\n self.limits = self.robot.phys_limits()\n self.labels = self.limits.index\n if dtype == 'synth':\n self._files = self._parse_synth(joint_coord)\n self._synth_input(joint_coord, self._files)\n self._synth_output(joint_states, self._files)\n else:\n input_ = np.load(joint_coord)\n self.input_ = F.normalize(torch.tensor(input_.reshape((input_.shape[0], -1)), dtype=torch.float32))\n self.output = F.normalize(torch.tensor(self._parse_json(joint_states), dtype=torch.float32))", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getMotorStats : \", robot.getMotorStats() )\n print( \"getEncoders : \", robot.getEncoders( 1 ) )\n print( \"getStall : \", robot.getStall() )\n print( \"setMotors 100, -100 : \", robot.setMotors( 100, -100) )\n time.sleep( 3.0 )\n print( \"setMotors -100, 100 : \", robot.setMotors( -100, 100) )\n time.sleep( 3.0 )\n print( \"setMotorsOff : \", robot.setMotorsOff() )\n\n robot.close()", "def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()", "def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s", "def robot_get_obs(sim):\n if sim.data.qpos is not None and sim.model.joint_names:\n names = [n for n in sim.model.joint_names if n.startswith('robot')]\n return (\n np.array([sim.data.get_joint_qpos(name) for name in names]),\n np.array([sim.data.get_joint_qvel(name) for name in names]),\n )\n return np.zeros(0), np.zeros(0)", "def readOdom(msg):\n global pose\n global xPosition\n global yPosition\n global theta\n global odom_list\n global odom_tf\n try:\n pose = msg.pose\n geo_quat = pose.pose.orientation\n q = [geo_quat.x, geo_quat.y, geo_quat.z, geo_quat.w]\n odom_tf.sendTransform((pose.pose.position.x, pose.pose.position.y, 0), \n (pose.pose.orientation.x, pose.pose.orientation.y,pose.pose.orientation.z,pose.pose.orientation.w),rospy.Time.now(),\"base_footprint\",\"odom\")\n #Convert transform to global usable coordinates (x, y, theta)\n (trans, rot) = odom_list.lookupTransform('map', 'base_footprint', rospy.Time(0))\n roll, pitch, yaw = euler_from_quaternion(rot)\n theta = yaw * (180.0/math.pi)\n xPosition = trans[0]\n yPosition = trans[1]\n except:\n print \"waiting\"", "def __init__(self):\n # Variables that we give through the constructor.\n # None in this case\n\n # Internal Vars\n # TODO[done] add controler Hint: $ rosservice call /jetbot_0/controller_manager/list_controllers\n self.controllers_list = ['jetbot_joint_state_controller',\n 'jetbot_velocity_controller'\n ]\n # TODO[done] add namespace Hint: $ rostopic list | grep controller\n self.robot_name_space = \"jetbot_0\"\n\n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(JetbotRobotEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=True)\n\n\n\n \"\"\"\n To check any topic we need to have the simulations running, we need to do two things:\n 1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations\n that are pause for whatever the reason\n 2) If the simulation was running already for some reason, we need to reset the controlers.\n This has to do with the fact that some plugins with tf, dont understand the reset of the simulation\n and need to be reseted to work properly.\n \"\"\"\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self._check_all_sensors_ready()\n\n # We Start all the ROS related Subscribers and publishers\n # TODO[done] add subscriber publisher\n rospy.Subscriber(\"/jetbot_0/joint_states\", JointState, self._joints_callback)\n rospy.Subscriber(\"/jetbot_0/jetbot_velocity_controller/odom\", Odometry, self._odom_callback)\n\n self._vel_pub = rospy.Publisher('/jetbot_0/jetbot_velocity_controller/cmd_vel',\n Twist, queue_size=6) # ??? queue size\n\n self._check_publishers_connection()\n \n self.gazebo.pauseSim()", "def __init__(self, robot_pose, robot_info):\n K3Supervisor.__init__(self, robot_pose, robot_info)\n\n # The maximal distance to an obstacle (inexact)\n self.distmax = robot_info.ir_sensors.rmax + robot_info.wheels.base_length/2\n\n # Fill in some parameters\n self.parameters.sensor_poses = robot_info.ir_sensors.poses[:]\n self.parameters.ir_max = robot_info.ir_sensors.rmax\n self.parameters.direction = 'left'\n self.parameters.distance = self.distmax*0.85\n \n self.process_state_info(robot_info)\n \n #Add controllers\n self.gtg = self.create_controller('GoToGoal', self.parameters)\n self.avoidobstacles = self.create_controller('AvoidObstacles', self.parameters)\n self.wall = self.create_controller('FollowWall', self.parameters)\n self.hold = self.create_controller('Hold', None)\n \n # Define transitions\n self.add_controller(self.hold,\n (lambda: not self.at_goal(), self.gtg))\n self.add_controller(self.gtg,\n (self.at_goal, self.hold),\n (self.at_wall, self.wall))\n self.add_controller(self.wall,\n (self.at_goal,self.hold),\n (self.unsafe, self.avoidobstacles),\n (self.wall_cleared, self.gtg))\n self.add_controller(self.avoidobstacles,\n (self.at_goal, self.hold),\n (self.safe, self.wall))\n\n # Start in the 'go-to-goal' state\n self.current = self.gtg", "def esm1v_t33_650M_UR90S_2():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_2\")", "def robotInit(self):\n\n #Initialize Networktables\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n \n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n \n self.stick = wpilib.Joystick(1)\n self.timer = wpilib.Timer()\n #Camera\n wpilib.CameraServer.launch()\n #Servo\n self.SV1 = wpilib.Servo(9)\n self.SV2 = wpilib.Servo(8) \n #Dashboard\n NetworkTables.initialize(server='10.61.62.2')\n #Switches\n self.SW0 = wpilib.DigitalInput(0)\n self.SW1 = wpilib.DigitalInput(1)\n #Elevator\n self.E = wpilib.VictorSP(5)\n self.prepareCubeFlag = 0\n self.grabCubeFlag = 0\n self.deliverCubeFlag = 0\n self.adjustLeftFlag=0\n self.adjustRightFlag=0\n self.driveFlag=0\n #Gyro\n self.gyro = wpilib.ADXRS450_Gyro(0)\n self.gyro.reset()\n #All possible autonomous routines in a sendable chooser\n '''\n self.chooser = wpilib.SendableChooser()\n self.chooser.addDefault(\"None\", '4')\n self.chooser.addObject(\"left-LeftScale\", '1')\n self.chooser.addObject(\"Middle-LeftScale\", '2')\n self.chooser.addObject(\"Right-LeftScale\", '3')\n self.chooser.addObject(\"Left-RightScale\", '5')\n '''\n #wpilib.SmartDashboard.putData('Choice', self.chooser)\n #Encoders\n self.EC1 = wpilib.Encoder(2,3)\n self.EC2 = wpilib.Encoder(4,5)\n self.EC1.reset()\n self.EC2.reset()", "def LoadModel(self):\n\t\tself.form = loader.loadModel(\"models/lampExport\")\n\t\t#self.form.setScale(.007)\n\t\tself.form.reparentTo(render)\n\t\tself.form.setPos(self.xpos, self.ypos, -30)", "def _import_elmo():\n\n elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz',\n trainable=False) # news\n # elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-twitter_2013-01_2018-04_600k_steps.tar.gz',\n # trainable=False) # twitter\n print('❤️ ❤️ ❤️ DONE (re)importing Tensorflow hub.Module ')\n print('Tensorflow version is', tf.__version__)\n\n return elmo", "def esm1v_t33_650M_UR90S_1():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_1\")" ]
[ "0.53342927", "0.5223863", "0.51131403", "0.50385404", "0.5028461", "0.495816", "0.49522844", "0.4951688", "0.48854965", "0.48458624", "0.4845302", "0.4821964", "0.47963774", "0.47869748", "0.47868013", "0.47747764", "0.4767754", "0.4742408", "0.473562", "0.47332805", "0.4684475", "0.4677446", "0.4668728", "0.46664882", "0.46582288", "0.46559972", "0.46476445", "0.4647231", "0.46463645", "0.4639018" ]
0.62102795
0
A closure to easily convert from a string joint name to the robot's actual DOF index.
def makeNameToIndexConverter(robot,autotranslate=True): return _oh.make_name_to_index_converter(robot,autotranslate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index(identify):\n\n pedaco = identify.replace('/', '')\n return int(pedaco)", "def index2word(index_word, index_dict):\n if index_word == -1 or index_word not in index_dict.keys():\n return '_eps_'\n else:\n return index_dict[index_word]", "def make_dof_value_map(robot):\n names = [j.GetName() for j in robot.GetJoints()]\n indices = [j.GetDOFIndex() for j in robot.GetJoints()]\n\n def get_dofs():\n pose={}\n values=robot.GetDOFValues()\n for (i,n) in zip(indices,names):\n pose.setdefault(n,values[i])\n return pose\n\n return get_dofs", "def getOqiNameIndx( self, name ):\n \n if not self.oqiNames:\n self.getOqiNames( )\n\n if name in self.oqiNames:\n return self.oqiNames[ name ]\n elif name in self.oqiNames.values():\n return name\n else:\n return -1", "def get_joint_number(self) -> int:\n return self.DoF", "def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]", "def get_door_index(self, door_name: str) -> int:\n if door_name == 'edgewood':\n return self.left_door\n elif door_name == 'encinal':\n return self.right_door\n elif door_name == 'both':\n return 0\n else:\n return int(door_name) - 1", "def getOfcNameIndx( self, name ):\n \n if not self.ofcNames:\n self.getOfcNames( )\n\n if name in self.ofcNames:\n return self.ofcNames[ name ]\n elif name in self.ofcNames.values():\n return name\n else:\n return -1", "def getIndex(condition='', component=''):\n if component == 'IC2' or component == 'IC14':\n index = '.nii[0]'\n elif component == 'IC7' or component == 'IC29':\n index = '.nii[1]'\n elif component == 'IC25':\n index = '.nii[2]'\n elif component == 'IC31':\n index = '.nii[3]'\n elif component == 'IC39':\n index = '.nii[4]'\n else:\n index = '.nii'\n\n return index", "def ionNameToIndex(self, name):\n for i in range(len(self.ions)):\n if self.ions[i].getName() == name:\n return i\n\n raise KeyError(\"No ion named '{}' found in the output.\".format(name))", "def index_to_vehicle_type(index):\n if index == 0:\n return \"wheel\"\n if index == 1:\n return \"tank\"\n if index == 2:\n return \"hover\"", "def algebraic_to_index(move: str) -> tuple[int, int]:\r\n return (RANK_TO_INDEX[move[1]], FILE_TO_INDEX[move[0]])", "def joint_adr(self, joint_name):\n jntadr = mjlib.mj_name2id(self.ptr, C.mjOBJ_JOINT, joint_name)\n assert (jntadr >= 0)\n dofmap = {C.mjJNT_FREE: 7,\n C.mjJNT_BALL: 4,\n C.mjJNT_SLIDE: 1,\n C.mjJNT_HINGE: 1}\n qposadr = self.jnt_qposadr[jntadr][0]\n qveladr = self.jnt_dofadr[jntadr][0]\n dof = dofmap[self.jnt_type[jntadr][0]]\n return (qposadr, qveladr, dof)", "def get_indexname(self,index):\n if index in self.indexname2index:\n return index\n else:\n for name,i in self.indexname2index.items():\n if self.index(index)==i:\n return name\n return None", "def get_nome_from_index(self,serie,index):\r\n nome = serie[index]\r\n return nome", "def getOthNameIndx( self, name ):\n \n if not self.othNames:\n self.getOthNames( )\n\n if name in self.othNames:\n return self.othNames[ name ]\n elif name in self.othNames.values():\n return name\n else:\n return -1", "def obs_to_index(obs, env_low, env_dx):\n a = math.floor((obs[0] - env_low[0])/env_dx[0])\n b = math.floor((obs[1] - env_low[1])/env_dx[1])\n return a, b", "def getOmIndex(self, name):\n for i in range(len(self.oameni)):\n if self.oameni[i].name == name:\n return i\n return None", "def index_id(i):\n return f\"(i={i})\"", "def indexByName(seq, name):\n return indexMatching(seq, lambda x: x.name == name)", "def getName(self, index) -> Str:\n ...", "def joint_callback(data):\n joints[0] = data.position[9]\n joints[1] = data.position[10]\n joints[2] = data.position[11]\n joints[3] = data.position[12]\n joints[4] = data.position[13]\n global position_geted\n position_geted = True", "def get_jpos(self, joint_name=None):\n raise NotImplementedError", "def _index_and_mapping(self, namespace):\n index, doc_type = namespace.split('.', 1)\n return index.lower(), doc_type", "def fetch_target(self, name):\n target_name = []\n \n for x in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n target_name.append( joint(joint_name = x, \n joint_target = rospy.get_param('/targets/'+name+'/'+x)) )\n return target_name", "def get_idx_to_target(self, idx):\n metadata = self.data.loc[idx]\n target = metadata['label']\n return target", "def get_lipid_from_index(ind: int) -> str:\r\n r_s = [I_PMPI, I_PI4P, I_PIP2, I_DAG, I_PMPA, I_ERPA, I_CDPDAG, I_ERPI]\r\n r_n = [L_PMPI, L_PI4P, L_PIP2, L_DAG, L_PMPA, L_ERPA, L_CDPDAG, L_ERPI]\r\n return r_n[r_s.index(ind)]", "def getColumnIndex(boardName, bltName):\n boardDict = columnIndexDict[boardName]\n columnIndex = boardDict[bltName]\n\n return columnIndex", "def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))", "def get_jvel(self, joint_name=None):\n raise NotImplementedError" ]
[ "0.5482621", "0.5456526", "0.5411834", "0.5256262", "0.52252036", "0.5196534", "0.51575613", "0.5150166", "0.5116707", "0.50905156", "0.5075703", "0.50337106", "0.4960798", "0.49572524", "0.4953177", "0.4950627", "0.49415132", "0.4921745", "0.49088845", "0.49038202", "0.49000984", "0.48957053", "0.48925653", "0.4883528", "0.48766533", "0.48671472", "0.4865814", "0.48333353", "0.4824312", "0.48238954" ]
0.55940926
0
Load up and configure the simpleFloor environment for hacking with physics. Sets some useful defaults.
def load_simplefloor(env): return _oh.load_scene(env,None,'simpleFloor.env.xml',True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def setup(self):\n build_world.start_level(self)", "def create_scene():\n create_floor()\n if config.M != \"\":\n if config.LEVEL == 1:\n create_wall()\n create_enemy()\n create_gap()\n create_platform()\n create_marijuana()\n create_star()\n create_fish()\n elif config.LEVEL == 2:\n create_boss()\n create_platform()\n create_star()", "def changeFlooring():\r\n\tif tileFloor.getVisible():\r\n\t\ttileFloor.visible(viz.OFF)\r\n\t\thardwoodFloor.visible(viz.ON)\r\n\telse:\r\n\t\ttileFloor.visible(viz.ON)\r\n\t\thardwoodFloor.visible(viz.OFF)", "def __init__(self):\r\n config = ConfigProvider().getProcessingConfig()\r\n self.xGround = config.get(\"xGround\")\r\n self.yGround = config.get(\"yGround\")", "def load_floorc():\n s = {}\n try:\n fd = open(os.path.expanduser('~/.floorc'), 'rb')\n except IOError as e:\n if e.errno == 2:\n return s\n raise\n\n default_settings = fd.read().split('\\n')\n fd.close()\n\n for setting in default_settings:\n # TODO: this is horrible\n if len(setting) == 0 or setting[0] == '#':\n continue\n try:\n name, value = setting.split(' ', 1)\n except IndexError:\n continue\n s[name.upper()] = value\n return s", "def setup_scene(self):\n\n # read map\n options, landscapes, statics, dynamics, trees, hero, hare = read_map('test.map')\n self.num_of_blocks_X, self.num_of_blocks_Y = options['size']\n with self.canvas:\n # init landscapes\n block_x = 0\n for i in xrange(self.num_of_blocks_X):\n block_y = 0\n for j in xrange(self.num_of_blocks_Y):\n class_name = landscapes[i][j]\n if class_name is not None:\n clazz = eval(class_name.capitalize())\n else:\n clazz = Grass\n block = clazz(pos=(block_x, block_y),\n size=(self.block_width, self.block_height), border=(0, 0))\n self.blocks[i][j] = block\n block_y += self.block_height \n block_x += self.block_width\n\n # init dynamics\n for x, y, class_name in dynamics:\n if 'dynamics_as_blocks' in options and options['dynamics_as_blocks']:\n x, y = (x + 0.5) * self.block_width, (y + 0.5) * self.block_height\n eval(class_name.capitalize())(x, y)\n \n with self.canvas:\n # draw or hero\n HeroRabbit(BLOCK_SIZE[0]*(hero[0] + 0.5), BLOCK_SIZE[1]*(hero[1] + 0.5))\n Hare(BLOCK_SIZE[0]*(hare[0] + 0.5), BLOCK_SIZE[1]*(hare[1] + 0.5))\n\n # init statics\n def _is_mountain(i, j):\n return int(0 <= i < self.num_of_blocks_X and 0 <= j <= self.num_of_blocks_Y and\n statics[i][j] == 'mountain')\n\n def _get_mountain_type(i, j):\n opensides = (_is_mountain(i - 1, j), _is_mountain(i, j + 1),\n _is_mountain(i + 1, j), _is_mountain(i, j - 1)) # left, top, right, bottom\n opensides_to_type = {\n (1, 1, 1, 1): 'center',\n (1, 0, 1, 0): 'horizontal_center',\n (0, 1, 0, 1): 'vertical_center',\n (1, 0, 0, 0): 'horizontal_right',\n (0, 1, 0, 0): 'vertical_bottom',\n (0, 0, 1, 0): 'horizontal_left',\n (0, 0, 0, 1): 'vertical_top',\n }\n return opensides_to_type.get(opensides, 'horizontal_center')\n \n _mountains = []\n _bushes= []\n \n for i in xrange(self.num_of_blocks_X):\n for j in xrange(self.num_of_blocks_Y):\n class_name = statics[i][j]\n if class_name is not None:\n pos = (i + 0.5) * self.block_width, (j + 0.5) * self.block_height\n if class_name == 'bush':\n #Bush(*pos)\n _bushes.append(pos)\n elif class_name == 'mountain':\n _mountains.append((pos, _get_mountain_type(i, j)))\n #Mountain(*pos, type=_get_mountain_type(i, j))\n \n for tree_pos in trees:\n Tree(BLOCK_SIZE[0]*(tree_pos[0] + 0.5), BLOCK_SIZE[1]*(tree_pos[1] + 0.5))\n \n with self.canvas:\n for pos in _bushes:\n Bush(*pos)\n \n for pos, type in _mountains:\n Mountain(*pos, type=type)\n\n HolyCarrot(13.5*self.block_width, 7.5*self.block_height)\n # This should be called at the end\n self.reindex_graphics()", "def setup_product():\n\n fiveconfigure.debug_mode = True\n import collective.geo.openlayers\n zcml.load_config('configuretest.zcml', collective.geo.openlayers)\n\n fiveconfigure.debug_mode = False", "def setupRender():\n prefs = getPreferences()\n\n # Check of the built-in environment maps path can be located.\n # Discontinue if it cannot be found.\n envPath = prefs.path_value\n if not envPath:\n return {'WARNING'}, \"No environment images path defined\"\n\n # Discontinue if there is no output path defined.\n renderPath = outputPath()\n if not renderPath:\n return {'WARNING'}, \"The scene needs to be saved before rendering\"\n\n if prefs.image_value == 'NONE':\n return {'WARNING'}, \"No environment image defined\"\n\n setRenderSettings(os.path.join(renderPath, IMAGE_NAME))\n createCamera()\n createWorld(envPath)\n return renderPath", "def init_vars(self):\n\n load_dotenv()\n self.smart_cube = True if os.environ.get(\"SMART_CUBE\") == \"True\" else False\n self.gen_parsed_to_cubedb = True if os.environ.get(\"GEN_PARSED_TO_CUBEDB\") == \"True\" else False\n self.name_of_solve = os.environ.get(\"NAME_OF_SOLVE\")\n self.time_solve = os.environ.get(\"TIME_SOLVE\")\n self.comms_unparsed_bool = True if os.environ.get(\"COMMS_UNPARSED\") == \"True\" else False\n self.gen_with_move_count = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.diff_to_solved_state = float(os.environ.get(\"DIFF_BETWEEN_ALGS\"))\n self.parse_to_lp = True if os.environ.get(\"PARSE_TO_LETTER_PAIR\") == \"True\" else False\n self.gen_with_moves = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.buffer_ed = self.get_buffer_ed(os.environ.get(\"EDGES_BUFFER\"))\n self.buffer_cor = self.get_buffer_cor(os.environ.get(\"CORNER_BUFFER\"))\n self.path_to_lp = os.environ.get(\"PATH_LETTER_PAIR_FILE\")\n self.dict_lp = self.load_letter_pairs_dict()", "def configure_location(self):\n # Set floor correctly\n self.floor.set(\"pos\", array_to_string(self.bottom_pos))", "def make_environment(self):\n\t\tbase_layer = 0\n\t\tself.Gravity = 9.81\n\n\t\t#Private data for to define model\n\t\t__model_max_altitude = 87000\n\t\t__atmosphere_layers = {0:0, 11000:1, 20000:2, 32000:3, 47000:4, 51000:5, 71000:6}\n\t\t__layer_base_data = {\n\t\t\t0:{'temp':288.15, 'lapse':-0.0065, 'press':101325},\n\t\t\t1:{'temp':216.65, 'lapse':0, 'press':22632.1},\n\t\t\t2:{'temp':216.65, 'lapse':0.001, 'press':5474.89},\n\t\t\t3:{'temp':228.65, 'lapse':0.0028, 'press':868.019},\n\t\t\t4:{'temp':270.65, 'lapse':0, 'press':110.906},\n\t\t\t5:{'temp':270.65, 'lapse':-0.0028, 'press':66.9389},\n\t\t\t6:{'temp':214.65, 'lapse':-0.002, 'press':3.95642},\n\t\t\t}\n\t\t__gas_constant = 8.31432#e3\n\t\t__air_molar_mass = 0.0289644\n\t\t__specific_heat_ratio = 1.4\n\t\t__visc_lambda = 1.51204129e-6\n\t\t__visc_sutherland_const = 120.0\n\n\t\tif self.Altitude > __model_max_altitude:\n\t\t\traise helpers.extra_exceptions.ModelExtrapolationException(\n\t\t\t'Exceeded model maximum altitude')\n\n\t\tlayerKeys = __atmosphere_layers.keys()\n\t\tlayerKeys = list(layerKeys)\n\t\tlayerKeys.sort()\n\t\tfor layer in layerKeys:\n\t\t\tif self.Altitude >= layer:\n\t\t\t\tbase_layer = __atmosphere_layers[layer]\n\t\t\t\tbase_alt = layer\n\t\tbase_temp = __layer_base_data[base_layer]['temp']\n\t\tbase_lapse = __layer_base_data[base_layer]['lapse']\n\t\tbase_press = __layer_base_data[base_layer]['press']\n\n\t\tself.Temperature = base_temp + base_lapse * (self.Altitude - base_alt)\n\t\t+ self.Temperature_offset\n\n\t\tif base_lapse == 0:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\tnp.exp( (-self.Gravity*__air_molar_mass*(self.Altitude-base_alt)) \\\n\t\t\t\t/(__gas_constant*base_temp))\n\t\telse:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\t(base_temp/self.Temperature) ** \\\n\t\t\t\t(self.Gravity*__air_molar_mass/__gas_constant/base_lapse)\n\n\t\tself.Density = __air_molar_mass*self.Pressure / \\\n\t\t\t__gas_constant/self.Temperature\n\t\tself.Speed_of_sound = np.sqrt(__specific_heat_ratio*__gas_constant* \\\n\t\t\tself.Temperature/__air_molar_mass)\n\t\tself.Dynamic_viscosity = __visc_lambda*self.Temperature**(3.0/2.0)/ \\\n\t\t\t(self.Temperature+__visc_sutherland_const)", "def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")", "def __init__(self):\n # Screen settings\n self.screen_width = 1860\n self.screen_height = 1020\n self.screen_size = self.screen_width, self.screen_height\n self.bg_color = 230, 230, 230\n\n # Ship static settings\n self.ship_limit = 3\n\n # Bullet static settings\n self.bullet_limit = 3\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 60, 60, 60\n\n # Alien static settings\n self.fleet_drop_speed = 10\n\n self.speed_up_scale = 1.1\n self.initialize_dynamic_settings()", "def setup(self):\n # Initialize the drawing environment (create main windows, etc)\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT)\n glutCreateWindow(name)\n\n glShadeModel(GL_SMOOTH)\n\n glClearDepth(1.0)\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(WINDOW_WIDTH)/float(WINDOW_HEIGHT), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n # Set up keyboard listeners.\n glutKeyboardFunc(self.on_key)", "def appInit(self):\n glutInitDisplayMode( GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH )\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0 )\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n glEnable( GL_LIGHTING )\n glEnable( GL_LIGHT0 )\n\n self.set_lighting()\n\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def __init__(self, room_type=None, surface=None, wall_covering=None, floor_covering=None, general_state=None):\n\n self._room_type = None\n self._surface = None\n self._wall_covering = None\n self._floor_covering = None\n self._general_state = None\n\n if room_type is not None:\n self.room_type = room_type\n if surface is not None:\n self.surface = surface\n if wall_covering is not None:\n self.wall_covering = wall_covering\n if floor_covering is not None:\n self.floor_covering = floor_covering\n if general_state is not None:\n self.general_state = general_state", "def appInit(self):\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n self.set_lighting()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def do_load_environment(self, *arg):\n print(\"Loading sensors\")\n self.environment = ArduinoSerialMonitor(auto_detect=False)\n self.do_enable_sensor('environment', delay=1)", "def initialize_dynamic_settings(self):\n self.ship_speed_factor = 1.5\n self.bullet_speed_factor = 3\n self.alien_speed_factor = 1\n self.fleet_direction = 1\n #Puntos\n self.alien_points = 50", "def __init__(self,\n launchSiteLat,\n launchSiteLon,\n launchSiteElev,\n dateAndTime,\n soundingFile,\n timeFromSounding,\n distanceFromSounding,\n inflationTemperature=0.0,\n UTC_offset=0.,\n debugging=False,\n load_on_init=False):\n # Initialize sounding-specific variables\n self.distanceFromSounding = distanceFromSounding\n self.timeFromSounding = timeFromSounding\n self.maxAltitude = 50999\n self.soundingFile = soundingFile\n\n self._interpolationPrecision = 200\n\n # Run the environment class initialization first\n super(soundingEnvironment, self).__init__(\n inflationTemperature=inflationTemperature,\n launchSiteLat=launchSiteLat,\n launchSiteLon=launchSiteLon,\n launchSiteElev=launchSiteElev,\n dateAndTime=dateAndTime,\n UTC_offset=UTC_offset,\n debugging=debugging,\n load_on_init=load_on_init)", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def initialize_dynamic_settings(self):\n self.ship_speed = 5\n self.bullet_speed = 1.0\n self.alien_speed=1.0\n #fleet direction of 1 represents right -1 represents left\n self.fleet_direction = 1\n #scoring\n self.alien_points=50", "async def test_default_setup_params(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"min_temp\") == 110\n assert state.attributes.get(\"max_temp\") == 140", "def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground", "def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')", "def setup(args):\n # chaparral,denseForest,lake,canyon,burning,burnt = neighbours\n config_path = args[0]\n config = utils.load(config_path)\n # -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --\n config.title = \"Forest Fire\"\n config.dimensions = 2\n config.states = \\\n (\n CHAPARRAL,\n DENSE_FORREST,\n LAKE,\n CANYON,\n BURNING,\n BURNT,\n START_BURN,\n END_BURN\n )\n\n # ------------ -------------------------------------------------------------\n\n config.state_colors = \\\n [\n (0.6,0.6,0), #chaparral\n (0,0.4,0), #dense forrest\n (0,0.5,1), #lake\n (0.5,0.5,0.5), #canyon\n (1,0,0), #burning\n (0.25,0.25,0.25), #burnt\n (1,0.7,0), #starting to burn\n (0.8,0,0.2) #ending burn\n ]\n\n config.grid_dims = (grid_size, grid_size)\n config.num_generations = 1000\n config.set_initial_grid(initial_grid)\n config.wrap = False\n\n # --------------------------------------------------------------------\n\n # the GUI calls this to pass the user defined config\n # into the main system with an extra argument\n # do not change\n if len(args) == 2:\n config.save()\n sys.exit()\n return config" ]
[ "0.6248853", "0.60405487", "0.57158166", "0.56440204", "0.558289", "0.55495113", "0.55436087", "0.5526429", "0.55164933", "0.5507159", "0.54870534", "0.5484633", "0.5480351", "0.5469515", "0.5452306", "0.5410114", "0.5379484", "0.53766954", "0.5355727", "0.5351993", "0.5343648", "0.5319706", "0.531408", "0.52978647", "0.5285846", "0.52725977", "0.52567655", "0.52548695", "0.5252536", "0.52495366" ]
0.6798784
0
Set tweaked finger torque for grasping experiment. Deprecated due to new torquebased servo control.
def set_finger_torque(robot,maxT,fingers): #Super kludgy... for f in fingers: if robot.GetJoint(f): robot.GetJoint(f).SetTorqueLimits([maxT]) robot.GetJoint(f).SetVelocityLimits([3]) robot.GetJoint(f).SetAccelerationLimits([30])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = 2\r\n if self.torque < torque:\r\n self.torque += dTorque\r\n elif self.torque > torque:\r\n self.torque -= dTorque\r\n \r\n for tire in self.tires:\r\n if tire.torque:\r\n tire.shape.setMotorTorque( self.torque )", "def set_tunings(self, kp, ki, kd, p_on_e):\n if kp < 0 or ki < 0 or kd < 0:\n print(\"PID parameters cannot be less than zero.\")\n if self.forward:\n self.kp = kp\n self.ki = ki\n self.kd = kd\n else:\n self.kp = -kp\n self.ki = -ki\n self.kd = -kd\n self.p_on_e = p_on_e", "def sent_torque(self,torque):\n if self.mode == 4: # Profiled Torque\n # enable operation\n self.node.sdo[0x6040].bits[0] = 1\n self.node.sdo[0x6040].bits[1] = 1\n self.node.sdo[0x6040].bits[2] = 1\n self.node.sdo[0x6040].bits[3] = 1\n #self.node.sdo[0x6040].bits[4] = 1\n #self.node.sdo[0x6040].bits[7] = 0\n self.node.sdo.download(0x6071, 0x0,self._decTohex(torque)) # torque", "def SetTweak(self, Weight = True, Reset = True):\n self.D = self.DTweak\n self.TargetAcc = self.AccTweak\n if Weight: self.W = self.WTweak\n if Reset: self.Reset()", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def set_target(self, target, useAvoidance=False, verbose=False):\n self.logger.info(\"Deprecated function set_target called. Please call head_direction.\")\n self.head_direction(self, target, useAvoidance, verbose)", "def torque_enable(self, value):\n self._write(MX_TORQUE_ENABLE, value)", "def teach_trick(self, trick_name):\r\n # TODO\r\n pass", "def setTilt(self,tilt = None):\n if tilt != None:\n self.tilt = tilt # Update tilt if given\n # Make surface normals to front and back faces allowing for the tilt\n fn = Unit3d(Angle(2*math.pi - self.angle/2 - self.tilt))\n bn = Unit3d(Angle(self.angle/2 - self.tilt))\n\n # Poistion of front and back surfaces allowing for the tilt\n p = Vector3d(0.0,0.0,self.height*math.tan(self.angle/2)/2)\n p.rotateAboutX(-self.tilt)\n\n # Update the locations and surface normals of the two faces.\n self[0].point = -p\n self[1].point = p\n self[0].normal = fn\n self[1].normal = bn\n return self", "def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)", "def change_tilt_thread(slider, cam):\n\n # disable the widget\n slider.disabled = True\n\n # move the tilt\n cam.set_elevation_angle(-slider.value)\n\n # wait before moving tilt again\n time.sleep(TILT_WAITING)\n\n # re-enable the widget\n slider.disabled = False", "def set_actuator(self, action):\n deltav = action[0]\n vt = np.clip(self.vt + deltav, -self.maxV, self.maxV)\n self.vt = vt\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=0,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=vt)\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=1,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=-vt)", "def set_gripper(self, new_gripper_state):\n self.gripper_servo_handler.set_angle(7, new_gripper_state)\n self.gripper_servo_handler.move_to_angles()", "def motor_torques(self):\n raise NotImplementedError('Not yet implemented!')", "def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE", "def set_trick_mode(self, trick_mode):\n if trick_mode not in ['previous', 'next']:\n raise ValueError('Trick mode must one of: previous, next')\n\n self.get(COMMAND_UIC, 'SetTrickMode', [('trickmode', trick_mode)])", "def changeRingSetting(self):\n #Input code to accommodate function of Ring setting", "def setReferencePoseSlider(self, part, *args):\n\n percent = float(args[0]) * .01\n self.setPosePercentage(percent, part)", "def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip\n # Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving? \n # How do we control the stepper motor? Where are the routines for that? \n pass", "def setTCLimits(*args):\n args[0].Limit.TCLimit.tc_limit = args[1]", "def settemp(t=-10):\n print camera.SetTemperature(t)\n camera.status.update()", "def servo_force(self, *args, **kwargs) -> Any:\n pass", "def torus(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True, degree:\n Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio: Union[float,\n bool]=2.0, minorSweep: Union[float, bool]=6.2831853, nodeState: Union[int, bool]=0,\n pivot: Union[List[float, float, float], bool]=None, radius: Union[float, bool]=1.0,\n sections: Union[int, bool]=8, spans: Union[int, bool]=1, startSweep: Union[float,\n bool]=0, tolerance: Union[float, bool]=0.01, useTolerance: bool=False,\n constructionHistory: bool=True, name: AnyStr=\"\", object: bool=True, polygon: int=0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def pain(self, int):\n self.vel[1] = int", "def setBrake(self, brake):\r\n if brake < 0.0:\r\n brake = 0.0\r\n elif brake > 1.0:\r\n brake = 1.0\r\n brake *= self.maxBrake\r\n for tire in self.tires:\r\n if tire.brake:\r\n tire.shape.setBrakeTorque( brake )", "def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func", "def aTorque(self):\n pass", "def customize_torpedo_speed(self, current_gameboard, turn, new_speed):\n current_gameboard['torpedo_speed'][turn] = new_speed", "def set_hand_vel(self,vel):\n # Calculate joint velocities to achieve desired velocity\n joint_vels=np.dot(self._kin.jacobian_pseudo_inverse(),vel)\n joints=dict(zip(self._arm.joint_names(),(joint_vels)))\n\n self._arm.set_joint_velocities(joints)", "def set_tilt(self, value):\n self._homekit_target_tilt = value\n _LOGGER.info(\"%s: Set tilt to %d\", self.entity_id, value)\n\n # HomeKit sends values between -90 and 90.\n # We'll have to normalize to [0,100]\n value = round((value + 90) / 180.0 * 100.0)\n\n params = {ATTR_ENTITY_ID: self.entity_id, ATTR_TILT_POSITION: value}\n\n self.call_service(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, params, value)" ]
[ "0.630241", "0.5443808", "0.54325783", "0.5350345", "0.5200339", "0.5137968", "0.5102211", "0.5079462", "0.50652754", "0.50623274", "0.5060927", "0.5049635", "0.5045565", "0.50452226", "0.5034421", "0.50263256", "0.50193846", "0.5009361", "0.49941224", "0.49921134", "0.49826652", "0.49802", "0.49763128", "0.49735805", "0.49546704", "0.49529678", "0.4943214", "0.49263668", "0.48997015", "0.4897729" ]
0.70379955
0
Returns recipe does not exist message
def _does_not_exist(): response_payload = dict( message="Recipe does not exist!" ) response_payload = jsonify(response_payload) return make_response(response_payload, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def non_existing_recipe_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def _make_not_found_message(index: Union[int, slice, str]) -> str:\n msg = [f\"Analysis result {index} not found.\"]\n errors = self.errors()\n if errors:\n msg.append(f\"Errors: {errors}\")\n return \"\\n\".join(msg)", "def pkg_not_found_mess(pkgname: str, reponame: str) -> None:\n meta = MainData()\n print(('{0}Package {1}{2} {0}not found in \\'{3}\\' '\n 'repository.{4}').format(meta.clrs['red'],\n meta.clrs['lcyan'],\n pkgname,\n reponame,\n meta.clrs['reset']))", "def _item_not_found(item):\n if _is_element_present(PROMPT_BOX[\"Heading\"]):\n if \"not on file\" in _get_text(PROMPT_BOX[\"Heading\"]):\n return click_message_box_key(\"OK\", verify=False)\n return False", "async def not_found(self, msg, command):\n await msg.channel.send(**{\n 'content': f'I do not understand `{command}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })", "def get_no_items_message(self):\n return _('There is no students registered for %(what)s.') % {\n 'what': self.request.cradmin_role.get_path()\n }", "def not_found(self, request):\n return 'Not found'", "def test_exists_false(self):\n self.assertFalse(Sample.exists('Not_a_Sample', self.sample_template))", "def test_exists_false(self):\n self.assertFalse(PrepSample.exists('Not_a_Sample', self.prep_template))", "def missing_entry_error(entry, name, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: Missing ', 'red')\n output_3 = colored(str(entry), attrs=['bold'])\n output_4 = colored(' in ', 'red')\n output_5 = colored(str(name), attrs=['bold'])\n return output_1 + output_2 + output_3 + output_4 + output_5", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def notfound(error):\n\n categories = [ \"business\",\n \"entertainment\",\n \"general\",\n \"health\",\n \"science\",\n \"sports\",\n \"technology\"\n ]\n return render_template(\"notfound.html\", categories = categories), 404", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_run_request_item__not_found(self):\n basics = {\n 'API_URL_ROOT': self.api_url_root,\n 'API_KEY': self.api_key,\n 'PARTNERSHIP_ID': self.partnership_id,\n 'UNIVERSITY_CODE': self.university_code,\n 'PICKUP_LOCATION': self.pickup_location,\n 'LOG_PATH': self.LOG_PATH }\n bd = BorrowDirect( basics )\n bd.run_request_item( self.patron_barcode, 'ISBN', self.isbn_not_found )\n self.assertEqual(\n {'Problem': {'ErrorCode': 'PUBRI003', 'ErrorMessage': 'No result'}}, bd.request_result )", "def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404", "def not_found(error):\n pass", "def getReason():", "def not_exist(request, obj_type):\r\n\tcheck_user(request)\r\n\r\n\tcontext = {'obj_type': obj_type}\r\n\r\n\treturn render(request, 'note/does_not_exist.html', context)", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def test_exists(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample.exists('SKM7.640188', SampleTemplate(1))", "def _item_exists(self, location):\n \"Does nothing\"", "def handle_notfound(self, message):\n cmd = self._popMatchingCmd(message)\n if cmd is not None:\n cmd.success(None)", "def not_found(e):\n return render_template(\"errors/404.html\"), 404", "def test_descriptor_with_item_not_found(self):\r\n\r\n self._get_descriptor_with_invalid_link(ItemNotFoundError)", "def option_not_exist_msg(option_name, existing_options):\n result = [\"option '%s' doesn't exist\" % option_name,\n \"Possible options are %s\" % existing_options or \"none\"]\n return \"\\n\".join(result)", "def test_request_item_not_found(self):\n r = Requester( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_not_found )\n result_dct = r.request_item(\n self.patron_barcode, search_key, search_value, self.pickup_location, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n {'Problem': {'ErrorCode': 'PUBRI003', 'ErrorMessage': 'No result'}}, result_dct )", "def fail_new_beer(name):\n return 'Doublon, la biere : %s' %name + ' existe deja'", "def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)", "def test_resource_not_existing(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"connections.non_existing_connection.name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Resource connections/non_existing_connection does not exist.\"\n )", "def test_resource_exists(self):\r\n\t\tself.assertTrue(self._configuration_.resources().has_key(\"AddWordTaskRepeat\") and self._configuration_.resources().has_key(\"RemoveWordTaskRepeat\"))" ]
[ "0.67053497", "0.638858", "0.6320744", "0.63052964", "0.609179", "0.60616803", "0.6032747", "0.6016889", "0.6012378", "0.5950096", "0.5946541", "0.5898837", "0.5846728", "0.582731", "0.58167356", "0.5800994", "0.5702433", "0.56634784", "0.56365335", "0.5624497", "0.5620596", "0.561177", "0.56022966", "0.5591492", "0.55777645", "0.5576623", "0.55760914", "0.55741197", "0.5571699", "0.5552457" ]
0.7538943
0
Parse the traceroute result
def parseTraceroute(self, stdoutputdata): itemlist = stdoutputdata.split("\n") res = defaultdict(list) for item in itemlist: re_ip = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', item) if re_ip: ip = re_ip.group(0) res["route"].append(ip) res["route"].append(self.task["destination"]) res["destination"] = self.task["destination"] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_traceroute_output(self):\n url = self.source['url']\n if 'post_data' in self.source:\n context = self.source['post_data']\n else:\n context = None\n status_code, content = self.urlopen(url, context=context)\n content = content.strip()\n regex = r'<pre.*?>(?P<traceroute>.*?)</pre>'\n pattern = re.compile(regex, re.DOTALL | re.IGNORECASE)\n try:\n traceroute = re.findall(pattern, content)[0].strip()\n except IndexError:\n # Manually append closing </pre> for partially downloaded page\n content = \"{0}</pre>\".format(content)\n traceroute = re.findall(pattern, content)[0].strip()\n return (status_code, traceroute)", "def test_traceroute_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute, quiet=True), self.osx_10_14_6_traceroute_json)", "def test_traceroute_centos_7_7(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.centos_7_7_traceroute, quiet=True), self.centos_7_7_traceroute_json)", "def test_traceroute():\n ret = (\n \" 1 1 ms <1 ms <1 ms 172.27.104.1\\n\"\n \" 2 1 ms <1 ms 1 ms 121.242.35.1.s[121.242.35.1]\\n\"\n \" 3 3 ms 2 ms 2 ms 121.242.4.53.s[121.242.4.53]\\n\"\n )\n mock = MagicMock(return_value=ret)\n with patch.dict(win_network.__salt__, {\"cmd.run\": mock}):\n assert win_network.traceroute(\"google.com\") == [\n {\n \"count\": \"1\",\n \"hostname\": None,\n \"ip\": \"172.27.104.1\",\n \"ms1\": \"1\",\n \"ms2\": \"<1\",\n \"ms3\": \"<1\",\n },\n {\n \"count\": \"2\",\n \"hostname\": None,\n \"ip\": \"121.242.35.1.s[121.242.35.1]\",\n \"ms1\": \"1\",\n \"ms2\": \"<1\",\n \"ms3\": \"1\",\n },\n {\n \"count\": \"3\",\n \"hostname\": None,\n \"ip\": \"121.242.4.53.s[121.242.4.53]\",\n \"ms1\": \"3\",\n \"ms2\": \"2\",\n \"ms3\": \"2\",\n },\n ]", "def traceroute(host, unique_id=None, index=None, sourcetype=\"traceroute\",\n source=\"traceroute_search_command\", logger=None, include_dest_info=True,\n include_raw_output=False):\n\n if system_name().lower() == \"windows\":\n cmd = [\"tracert\"]\n else:\n cmd = [\"traceroute\"]\n\n # Add the host argument\n cmd.append(host)\n\n # Run the traceroute command and get the output\n output = None\n return_code = None\n\n try:\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n return_code = 0\n except subprocess.CalledProcessError as exception:\n output = exception.output\n return_code = exception.returncode\n except OSError as exception:\n if exception.errno == errno.ENOENT:\n raise CommandNotFoundException(cmd[0])\n else:\n raise exception\n\n # Parse the output\n try:\n trp = Traceroute.parse(output)\n\n # This will contain the hops\n parsed = []\n\n hop_idx = 0\n\n # Make an entry for each hop\n for hop in trp.hops:\n\n if hop.probes is None or len(hop.probes) == 0:\n continue\n\n hop_idx = hop_idx + 1\n\n # This will track the probes\n rtts = []\n ips = []\n names = []\n\n hop_dict = collections.OrderedDict()\n hop_dict['hop'] = hop_idx\n\n for probe in hop.probes:\n\n if probe.rtt is not None:\n rtts.append(str(probe.rtt))\n\n if probe.dest_ip is not None:\n ips.append(probe.dest_ip)\n\n if probe.dest is not None:\n names.append(probe.dest)\n\n hop_dict['rtt'] = rtts\n hop_dict['ip'] = ips\n hop_dict['name'] = names\n\n if include_dest_info:\n hop_dict['dest_ip'] = trp.dest_ip\n hop_dict['dest_host'] = trp.dest\n\n if include_raw_output:\n hop_dict['output'] = output\n\n parsed.append(hop_dict)\n\n except Exception:\n\n if logger:\n logger.exception(\"Unable to parse traceroute output\")\n\n raise Exception(\"Unable to parse traceroute output\")\n\n # Write the event as a stash new file\n if index is not None:\n writer = StashNewWriter(index=index, source_name=source, sourcetype=sourcetype,\n file_extension=\".stash_output\")\n\n # Let's store the basic information for the traceroute that will be included with each hop\n proto = collections.OrderedDict()\n\n # Include the destination info if it was included already\n if not include_dest_info:\n proto['dest_ip'] = trp.dest_ip\n proto['dest_host'] = trp.dest\n\n if unique_id is None:\n unique_id = binascii.b2a_hex(os.urandom(4))\n\n proto['unique_id'] = unique_id\n\n for parsed_hop in parsed:\n\n result = collections.OrderedDict()\n result.update(parsed_hop)\n result.update(proto)\n\n # Log that we performed the traceroute\n if logger:\n logger.debug(\"Wrote stash file=%s\", writer.write_event(result))\n\n return output, return_code, parsed", "def traceroute(self,dest):\n\t\tself.tn.write('traceroute %s\\n'%(dest))\n\t\tself.tn.write('exit\\n')\n\t\tresp = self.tn.read_all()\n\t\treturn resp", "def test_traceroute6_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute6, quiet=True), self.osx_10_14_6_traceroute6_json)", "def parse_output(result):\n output = result['output']\n parsed = output.split('\\n')\n output = []\n for _line in parsed:\n output.append(_line.strip())\n log.debug(_line)\n return output", "def test_traceroute_n_ipv4(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.generic_traceroute_n_ipv4, quiet=True), self.generic_traceroute_n_ipv4_json)", "def __run_traceroute(self):\n self.print_debug(\"ip_address={0}\".format(self.ip_address))\n\n filename = \"{0}.{1}.txt\".format(self.ip_address, self.country)\n filepath = os.path.join(self.tmp_dir, filename)\n\n if not os.path.exists(filepath):\n if self.country == \"LO\":\n status_code, traceroute = self.execute_cmd(self.source['url'])\n else:\n status_code, traceroute = self.get_traceroute_output()\n if status_code != 0 and status_code != 200:\n return {'error': status_code}\n open(filepath, \"w\").write(traceroute)\n traceroute = open(filepath, \"r\").read()\n\n self.raw_string = traceroute \n self.__get_hops(traceroute)\n\n\n #if not self.no_geo:\n # self.__get_geocoded_hops()\n\n #self.hops = map(lambda h: {h.pop(\"hop_num\") : h}, self.hops)", "def test_traceroute_noheader(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute_noheader, quiet=True), self.osx_10_14_6_traceroute_no_header_json)", "def parse_client_stdout(txt):\n r = Result.from_netperf_stdout(txt)\n return r", "def test_traceroute_n_q1_ipv4(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.generic_traceroute_n_q1_ipv4, quiet=True), self.generic_traceroute_n_q1_ipv4_json)", "def test_traceroute_q_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute_q, quiet=True), self.osx_10_14_6_traceroute_q_json)", "def test_traceroute_freebsd12(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.freebsd12_traceroute, quiet=True), self.freebsd12_traceroute_json)", "def test_traceroute_a_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute_asn, quiet=True), self.osx_10_14_6_traceroute_asn_json)", "def test_traceroute6_freebsd12(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.freebsd12_traceroute6, quiet=True), self.freebsd12_traceroute6_json)", "def parse(res):\n res=res[0]\n steps = []\n if not isinstance(res, dict):\n return {}\n\n for step in res[\"legs\"][0][\"steps\"]:\n instruction = re.sub('<[^<]+?>', '', step[\"html_instructions\"])\n distance = step[\"distance\"][\"text\"]\n duration = step[\"duration\"][\"text\"]\n\n if step[\"travel_mode\"] == \"TRANSIT\":\n departure_stop = step[\"transit_details\"][\"departure_stop\"][\"name\"]\n arrival_stop = step[\"transit_details\"][\"arrival_stop\"][\"name\"]\n departure_time = step[\"transit_details\"][\"departure_time\"][\"text\"]\n arrival_time = step[\"transit_details\"][\"arrival_time\"][\"text\"]\n num_stops = step[\"transit_details\"][\"num_stops\"]\n bus_name = step[\"transit_details\"][\"headsign\"]\n\n steps.append({\n \"distance\": distance,\n \"duration\": duration,\n \"instruction\": instruction,\n \"bus_name\": bus_name,\n \"num_stops\": num_stops,\n \"arrival_time\": arrival_time,\n \"departure_time\": departure_time,\n \"departure_stop\": departure_stop,\n \"arrival_stop\": arrival_stop,\n \"travel_mode\": \"TRANSIT\"\n })\n else:\n substeps = []\n if \"steps\" in step:\n for step2 in step[\"steps\"]:\n instruction2 = re.sub('<[^<]+?>', '', step2[\"html_instructions\"])\n distance2 = step2[\"distance\"][\"text\"]\n duration2 = step2[\"duration\"][\"text\"]\n\n substeps.append({\n \"distance\": distance2,\n \"duration\": duration2,\n \"instruction\": instruction2\n })\n steps.append({\n \"distance\": distance,\n \"duration\": duration,\n \"instruction\": instruction,\n \"substeps\": substeps,\n \"travel_mode\": step[\"travel_mode\"]\n })\n\n return {\n \"arrival_time\": res[\"legs\"][0].get(\"arrival_time\", {}).get(\"text\", None),\n \"departure_time\": res[\"legs\"][0].get(\"departure_time\", {}).get(\"text\", None),\n \"end_address\": res[\"legs\"][0][\"end_address\"],\n \"start_address\": res[\"legs\"][0][\"start_address\"],\n \"distance\": res[\"legs\"][0][\"distance\"][\"text\"],\n \"duration\": res[\"legs\"][0][\"duration\"][\"text\"],\n \"steps\": steps,\n }", "def test_traceroute_mult_addresses_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute_mult_addresses, quiet=True), self.osx_10_14_6_traceroute_mult_addresses_json)", "async def run(self):\n\n result = {'hops': [],\n 'start_timestamp': time()}\n\n if self.icmp:\n trace = await create_subprocess_exec(\"traceroute\",\n \"-n\",\n \"-I\",\n \"-w\" + self.wait_time,\n \"-m\" + self.max_hops,\n \"-q 1\",\n self.device,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n trace = await create_subprocess_exec(\"traceroute\",\n \"-n\",\n \"-w\" + self.wait_time,\n \"-m\" + self.max_hops,\n \"-q 1\",\n self.device,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n stdout = await trace.stdout.read()\n stderr = await trace.stderr.read()\n\n if stderr:\n result['error'] = stderr\n\n lines = stdout.splitlines()\n # remove first line \"traceroute to...\"\n del lines[0]\n\n for line in lines:\n line = line.decode('utf-8')\n ip_address = self.extract_ip_from_line(line)\n rtt = self.extract_rtt_from_line(line)\n if(ip_address):\n result['hops'].append({'ip_address': ip_address,\n 'rtt': rtt})\n elif '*' in line:\n result['hops'].append({'ip_address': '*',\n 'rtt': '*'})\n\n result['end_timestamp'] = time()\n self.results.append(result)", "def test_traceroute_nodata(self):\n self.assertEqual(jc.parsers.traceroute.parse('', quiet=True), {})", "def _parse_response(response):\n m = re.match(r\"^(?P<alias>[^\\s]*)\\s+(?P<resp>.*)$\", response)\n return m.group('alias'), m.group('resp')", "def test_traceroute6_mult_addresses_osx_10_14_6(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute6_mult_addresses, quiet=True), self.osx_10_14_6_traceroute6_mult_addresses_json)", "def _parse_ip_stats_link_show(raw_result):\n\n show_re = (\n r'.+?RX:.*?\\n'\n r'\\s*(?P<rx_bytes>\\d+)\\s+(?P<rx_packets>\\d+)\\s+(?P<rx_errors>\\d+)\\s+'\n r'(?P<rx_dropped>\\d+)\\s+(?P<rx_overrun>\\d+)\\s+(?P<rx_mcast>\\d+)'\n r'.+?TX:.*?\\n'\n r'\\s*(?P<tx_bytes>\\d+)\\s+(?P<tx_packets>\\d+)\\s+(?P<tx_errors>\\d+)\\s+'\n r'(?P<tx_dropped>\\d+)\\s+(?P<tx_carrier>\\d+)\\s+(?P<tx_collisions>\\d+)'\n )\n\n re_result = match(show_re, raw_result, DOTALL)\n result = None\n\n if (re_result):\n result = re_result.groupdict()\n for key, value in result.items():\n if value is not None:\n if value.isdigit():\n result[key] = int(value)\n\n return result", "def parse_directions_response(directions_response):\n if directions_response:\n route_response = directions_response[0]\n route_points = []\n route_distances = []\n\n legs = route_response[\"legs\"]\n first_point = (legs[0][\"steps\"][0][\"start_location\"][\"lat\"],\n legs[0][\"steps\"][0][\"start_location\"][\"lng\"])\n route_points.append(first_point)\n\n for leg in legs:\n for step in leg[\"steps\"]:\n new_point = (step[\"end_location\"][\"lat\"],\n step[\"end_location\"][\"lng\"])\n new_distance = step[\"distance\"][\"value\"] # distance from step's start to end in meters\n route_points.append(new_point)\n route_distances.append(new_distance)\n\n return (route_points, route_distances)\n\n else:\n raise ValueError(\"no route between start and end, try new points\")", "def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df", "def parse_snmp_response(response, type):\n values = []\n root = etree.fromstring(response)\n body = root.findall('{%s}Body'%'http://schemas.xmlsoap.org/soap/envelope/')\n for b in body:\n message = b.findall('{%s}message'%'http://ggf.org/ns/nmwg/base/2.0/')\n for m in message:\n data = m.findall('{%s}data'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d in data:\n datum = d.findall('{%s}datum'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d2 in datum:\n #to check this is not an error message\n if d2.text != '':\n if d2.attrib['value'] != '' and d2.attrib['value'] != None and d2.attrib['value'] != 'nan':\n v = {}\n v['timeValue'] = datetime.fromtimestamp(float(d2.attrib['timeValue']))\n v['value']=d2.attrib['value']\n if type!=\"lamp\":\n v['valueUnits'] = d2.attrib['valueUnits']\n values.append(v)\n\n return values", "def _result_to_dict(line):\n f = line.split(':;')\n return {'server': f[0], 'os_name': f[1], 'status': f[2], 'ipv4': f[3]}", "def parse_response(response):\n # a result should always have a status\n status = response['status']\n\n # a result _may_ have a results or a reason\n result = response.get('results', [])\n reason = response.get('reason', None)\n\n return status, result, reason", "def _handle_result(result: 'Request'):\n for route in result.routes:\n if route.executor == GATEWAY_NAME:\n route.end_time.GetCurrentTime()\n\n self._update_end_request_metrics(result)\n\n return result" ]
[ "0.6905173", "0.6265325", "0.6184163", "0.6166134", "0.60980153", "0.6027709", "0.6013491", "0.59370697", "0.5883287", "0.5868269", "0.5859081", "0.58563274", "0.5829786", "0.5819288", "0.5764864", "0.56720495", "0.5631726", "0.5578696", "0.5578418", "0.5531463", "0.5482655", "0.5462603", "0.54329467", "0.54140913", "0.5375431", "0.53579706", "0.53420764", "0.5329489", "0.5325534", "0.53243303" ]
0.7362002
0
Symmetric decorrelation i.e. W < (W W.T) ^{1/2} W
def _sym_decorrelation(W): s, u = linalg.eigh(np.dot(W, W.T)) # Avoid sqrt of negative values because of rounding errors. Note that # np.sqrt(tiny) is larger than tiny and therefore this clipping also # prevents division by zero in the next step. s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None) # u (resp. s) contains the eigenvectors (resp. square roots of # the eigenvalues) of W * W.T return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _gs_decorrelation(w, W, j):\n w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])\n return w", "def _symmetric(updates):\n sym_updates = updates[:-1] + [updates[-1]] + updates[:-1][::-1]\n coeff = [0.5]*(len(updates)-1) + [1.0] + [0.5]*(len(updates) - 1)\n return ExplicitIntegrator(coeff, sym_updates)", "def dcweights(x):\n\n #Form the vanderMonde matrix:\n A=np.vander(x).T\n A=A[::-1,:]\n F=0*A\n n=snp.arange(len(x))+1\n for i in range(len(x)-1):\n a=x[i]; b=x[i+1]\n f=(b**n-a**n)/n\n F[:,i]=f\n w=snp.solve(A,F)\n\n return w[:,:-1]", "def weight_symmetry(a, b):\n return 1 - (np.abs(a - b) / (a + b))", "def determinant(v,w):\n return v[0] * w[1] - v[1] * w[0]", "def dtw(ts1, ts2, derivative=False):\n s = ts1\n t = ts2\n\n if derivative:\n tmp_ts1 = []\n tmp_ts2 = []\n for i in range(len(ts1) - 1):\n tmp_ts1.append(ts1[i + 1] - ts1[i])\n tmp_ts2.append(ts2[i + 1] - ts2[i])\n s = tmp_ts1\n t = tmp_ts2\n\n n, m = len(s), len(t)\n dtw_matrix = np.zeros((n + 1, m + 1))\n for i in range(n + 1):\n for j in range(m + 1):\n dtw_matrix[i, j] = np.inf\n dtw_matrix[0, 0] = 0\n\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n cost = abs(s[i - 1] - t[j - 1])\n # take last min from a square box\n last_min = np.min([dtw_matrix[i - 1, j], dtw_matrix[i, j - 1], dtw_matrix[i - 1, j - 1]])\n dtw_matrix[i, j] = cost + last_min\n return dtw_matrix[-1][-1]", "def vincdecnorm(arr):\n tmp = convert(arr, GEOCENTRIC_CARTESIAN, GEOCENTRIC_SPHERICAL)\n return -tmp[..., 0], tmp[..., 1], tmp[..., 2]", "def test_deconvolve_symmetric(self):\n tau = 50.0\n tau_deconv1 = 5.0\n tau_deconv2 = 20.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 60.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.tau_deconv1 = tau_deconv1\n self.rule.tau_deconv2 = tau_deconv2\n\n self.motor.error_fct = lambda t: 2*np.sin(0.123 + t/15.0)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.tau_deconv1 = tau_deconv2\n self.rule.tau_deconv2 = tau_deconv1\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertTrue(np.allclose(M1.out, M2.out))", "def test_deconvolve_once_symmetric(self):\n tau = 50.0\n tau_deconv = 20.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 60.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.tau_deconv1 = tau_deconv\n self.rule.tau_deconv2 = None\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.tau_deconv1 = None\n self.rule.tau_deconv2 = tau_deconv\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertTrue(np.allclose(M1.out, M2.out))", "def symmetric_diff(a,b):\n return a ^ b", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def nac_w_variance(r):\n if (r == 0):\n return 0\n else:\n return (1 - np.tanh(r) / r) * (r - np.tanh(r / 2)) * (1 / (2 * r))", "def _dy(self, T):\n return self._h(np.diff(T)) * self._a / self._m / self._c * np.diff(T) * np.array([1, -1])", "def eq(w, x):\n return (-w[1]*x - w[0]) / w[2]", "def DC(s,theta=0,grid='eq',num_corr=None):\n if num_corr is None:\n num_corr = s\n\n # Choose the grid:\n if grid=='eq':\n t=snp.arange(s+1)/s # Equispaced\n elif grid=='cheb':\n t=0.5*(np.cos(np.arange(0,s+1)*np.pi/s)+1.) #Chebyshev\n t=t[::-1]\n\n dt=np.diff(t)\n\n alpha=snp.zeros([s*(num_corr+1)+1,s*(num_corr+1)])\n beta=snp.zeros([s*(num_corr+1)+1,s*(num_corr+1)])\n\n w=dcweights(t) #Get the quadrature weights for our grid\n #w[i,j] is the weight of node i for the integral\n #over [x_j,x_j+1]\n\n #first iteration (k=1)\n for i in range(1,s+1):\n alpha[i,i-1] = 1\n beta[i ,i-1] = dt[i-1]\n\n #subsequent iterations:\n for k in range(1,num_corr+1):\n beta[s*k+1,0]=w[0,0]\n for i in range(1,s+1):\n alpha[s*k+1,0]=1\n beta[s*k+1,s*(k-1)+i]=w[i,0]\n\n for m in range(1,s):\n alpha[s*k+m+1,s*k+m] = 1\n beta[s*k+m+1,s*k+m] = theta*dt[m]\n beta[s*k+m+1,0]=w[0,m]\n for i in range(1,s+1):\n beta[s*k+m+1,s*(k-1)+i]=w[i,m]\n if i==m:\n beta[s*k+m+1,s*(k-1)+i]-=theta*dt[m]\n\n name='Deferred correction method of order '+str(s+1)\n return ExplicitRungeKuttaMethod(alpha=alpha,beta=beta,name=name,order=s+1).dj_reduce()", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def periodic_corr(x, y):\r\n return np.fft.ifft(np.fft.fft(x) * np.fft.fft(y).conj()).real", "def transverse_resonator(Rs, Q, wr, w):\n Rs = _np.array(Rs,ndmin=1,dtype=float)[:,None] # I am using broadcasting\n Q = _np.array(Q, ndmin=1,dtype=float)[:,None]\n wr = _np.array(wr,ndmin=1,dtype=float)[:,None]\n Zt = wr*Rs/(w + 1j*Q*(wr - w**2/wr))\n return Zt.sum(0).flatten()", "def corr_deriv(inp0, inp1, flag: bool = False):\n\n # 1st derivative\n tx1 = inp0.time.data.astype(int) * 1e-9\n inp0 = inp0.data\n dtx1 = tx1[:-1] + 0.5 * np.diff(tx1)\n dx1 = np.diff(inp0)\n\n tx2 = inp1.time.data.astype(int) * 1e-9\n inp1 = inp1.data\n dtx2 = tx2[:-1] + 0.5 * np.diff(tx2)\n dx2 = np.diff(inp1)\n\n ind_zeros1 = np.where(np.sign(dx1[:-1] * dx1[1:]) < 0)[0]\n if ind_zeros1 == 0:\n ind_zeros1 = ind_zeros1[1:]\n\n ind_zeros2 = np.where(np.sign(dx2[:-1] * dx2[1:]) < 0)[0]\n if ind_zeros2 == 0:\n ind_zeros2 = ind_zeros2[1:]\n\n ind_zeros1_p = np.where(dx1[ind_zeros1 - 1] - dx1[ind_zeros1] > 0)[0]\n ind_zeros2_p = np.where(dx2[ind_zeros2 - 1] - dx2[ind_zeros2] > 0)[0]\n\n ind_zeros1_m = np.where(dx1[ind_zeros1 - 1] - dx1[ind_zeros1] < 0)[0]\n ind_zeros2_m = np.where(dx2[ind_zeros2 - 1] - dx2[ind_zeros2] < 0)[0]\n\n ind1_p = ind_zeros1[ind_zeros1_p]\n ind1_m = ind_zeros1[ind_zeros1_m]\n\n t_zeros1_p = dtx1[ind1_p] + (dtx1[ind1_p + 1] - dtx1[ind1_p]) / (\n 1 + np.abs(dx1[ind1_p + 1]) / np.abs(dx1[ind1_p]))\n t_zeros1_m = dtx1[ind1_m] + (dtx1[ind1_m + 1] - dtx1[ind1_m]) / (\n 1 + np.abs(dx1[ind1_m + 1]) / np.abs(dx1[ind1_m]))\n\n ind2_p = ind_zeros2[ind_zeros2_p]\n ind2_m = ind_zeros2[ind_zeros2_m]\n\n t_zeros2_p = dtx2[ind2_p] + (dtx2[ind2_p + 1] - dtx2[ind2_p]) / (\n 1 + np.abs(dx2[ind2_p + 1]) / np.abs(dx2[ind2_p]))\n t_zeros2_m = dtx2[ind2_m] + (dtx2[ind2_m + 1] - dtx2[ind2_m]) / (\n 1 + np.abs(dx2[ind2_m + 1]) / np.abs(dx2[ind2_m]))\n\n # Remove repeating points\n t_zeros1_p = np.delete(t_zeros1_p, np.where(np.diff(t_zeros1_p) == 0)[0])\n t_zeros2_p = np.delete(t_zeros2_p, np.where(np.diff(t_zeros2_p) == 0)[0])\n\n # Define identical pairs of two time axis\n t1_d_p, t2_d_p, _, _ = find_closest(t_zeros1_p, t_zeros2_p)\n t1_d_m, t2_d_m, _, _ = find_closest(t_zeros1_m, t_zeros2_m)\n\n t1_d = np.vstack([t1_d_p, t1_d_m])\n t1_d = t1_d[t1_d[:, 0].argsort(), 0]\n\n t2_d = np.vstack([t2_d_p, t2_d_m])\n t2_d = t2_d[t2_d[:, 0].argsort(), 0]\n\n if flag:\n # zero crossings\n ind_zeros1 = np.where(np.sign(inp0[:-1] * inp0[1:]) < 0)[0]\n ind_zeros2 = np.where(np.sign(inp1[:-1] * inp1[1:]) < 0)[0]\n\n ind_zeros1 = np.delete(ind_zeros1, np.where(ind_zeros1 == 1)[0])\n ind_zeros2 = np.delete(ind_zeros2, np.where(ind_zeros2 == 1)[0])\n\n ind_zeros1_p = np.where(inp0[ind_zeros1 - 1] - inp0[ind_zeros1] > 0)[0]\n ind_zeros2_p = np.where(inp1[ind_zeros2 - 1] - inp1[ind_zeros2] > 0)[0]\n\n ind_zeros1_m = np.where(inp0[ind_zeros1 - 1] - inp0[ind_zeros1] < 0)[0]\n ind_zeros2_m = np.where(inp1[ind_zeros2 - 1] - inp1[ind_zeros2] < 0)[0]\n\n ind1_p = ind_zeros1[ind_zeros1_p]\n ind1_m = ind_zeros1[ind_zeros1_m]\n\n t_zeros1_p = tx1[ind1_p] + (tx1[ind1_p + 1] - tx1[ind1_p]) / (\n 1 + np.abs(inp0[ind1_p + 1]) / np.abs(inp0[ind1_p]))\n t_zeros1_m = tx1[ind1_m] + (tx1[ind1_m + 1] - tx1[ind1_m]) / (\n 1 + np.abs(inp0[ind1_m + 1]) / np.abs(inp0[ind1_m]))\n\n ind2_p = ind_zeros2[ind_zeros2_p]\n ind2_m = ind_zeros2[ind_zeros2_m]\n\n t_zeros2_p = tx2[ind2_p] + (tx2[ind2_p + 1] - tx2[ind2_p]) / (\n 1 + np.abs(inp1[ind2_p + 1]) / np.abs(inp1[ind2_p]))\n t_zeros2_m = tx2[ind2_m] + (tx2[ind2_m + 1] - tx2[ind2_m]) / (\n 1 + np.abs(inp1[ind2_m + 1]) / np.abs(inp1[ind2_m]))\n\n else:\n # 2nd derivative\n dd_tx1 = dtx1[:-1] + 0.5 * np.diff(dtx1)\n ddx1 = np.diff(dx1)\n\n dd_tx2 = dtx2[:-1] + 0.5 * np.diff(dtx2)\n ddx2 = np.diff(dx2)\n\n ind_zeros1 = np.where(np.sign(ddx1[:-1] * ddx1[1:]) < 0)[0]\n ind_zeros2 = np.where(np.sign(ddx2[:-1] * ddx2[1:]) < 0)[0]\n\n ind_zeros1 = np.delete(ind_zeros1, np.where(ind_zeros1 == 1)[0])\n ind_zeros2 = np.delete(ind_zeros2, np.where(ind_zeros2 == 1)[0])\n\n ind_zeros1_p = np.where(ddx1[ind_zeros1 - 1] - ddx1[ind_zeros1] > 0)[0]\n ind_zeros2_p = np.where(ddx2[ind_zeros2 - 1] - ddx2[ind_zeros2] > 0)[0]\n\n ind_zeros1_m = np.where(ddx1[ind_zeros1 - 1] - ddx1[ind_zeros1] < 0)[0]\n ind_zeros2_m = np.where(ddx2[ind_zeros2 - 1] - ddx2[ind_zeros2] < 0)[0]\n\n ind1_p = ind_zeros1[ind_zeros1_p]\n ind1_m = ind_zeros1[ind_zeros1_m]\n\n t_zeros1_p = dd_tx1[ind1_p] + (dd_tx1[ind1_p + 1] - dd_tx1[ind1_p]) / (\n 1 + np.abs(ddx1[ind1_p + 1]) / np.abs(ddx1[ind1_p]))\n t_zeros1_m = dd_tx1[ind1_m] + (dd_tx1[ind1_m + 1] - dd_tx1[ind1_m]) / (\n 1 + np.abs(ddx1[ind1_m + 1]) / np.abs(ddx1[ind1_m]))\n\n ind2_p = ind_zeros2[ind_zeros2_p]\n ind2_m = ind_zeros2[ind_zeros2_m]\n\n t_zeros2_p = dd_tx2[ind2_p] + (dd_tx2[ind2_p + 1] - dd_tx2[ind2_p]) / (\n 1 + np.abs(ddx2[ind2_p + 1]) / np.abs(ddx2[ind2_p]))\n t_zeros2_m = dd_tx2[ind2_m] + (dd_tx2[ind2_m + 1] - dd_tx2[ind2_m]) / (\n 1 + np.abs(ddx2[ind2_m + 1]) / np.abs(ddx2[ind2_m]))\n\n # Define identical pairs of two time axis\n t1_dd_p, t2_dd_p, _, _ = find_closest(t_zeros1_p, t_zeros2_p)\n t1_dd_m, t2_dd_m, _, _ = find_closest(t_zeros1_m, t_zeros2_m)\n\n t1_dd = np.vstack([t1_dd_p, t1_dd_m])\n t1_dd = t1_dd[t1_dd[:, 0].argsort(), 0]\n\n t2_dd = np.vstack([t2_dd_p, t2_dd_m])\n t2_dd = t2_dd[t2_dd[:, 0].argsort(), 0]\n\n return t1_d, t2_d, t1_dd, t2_dd", "def backward_committor_sensitivity(T, A, B, index):\n\n # This is really ugly to compute. The problem is, that changes in T induce changes in\n # the stationary distribution and so we need to add this influence, too\n # I implemented something which is correct, but don't ask me about the derivation\n\n n = len(T)\n\n trT = numpy.transpose(T)\n\n one = numpy.ones(n)\n eq = stationary_distribution(T)\n\n mEQ = numpy.diag(eq)\n mIEQ = numpy.diag(1.0 / eq)\n mSEQ = numpy.diag(1.0 / eq / eq)\n\n backT = numpy.dot(mIEQ, numpy.dot(trT, mEQ))\n\n qMat = forward_committor_sensitivity(backT, A, B, index)\n\n matA = trT - numpy.identity(n)\n matA = numpy.concatenate((matA, [one]))\n\n phiM = numpy.linalg.pinv(matA)\n\n phiM = phiM[:, 0:n]\n\n trQMat = numpy.transpose(qMat)\n\n d1 = numpy.dot(mSEQ, numpy.diagonal(numpy.dot(numpy.dot(trT, mEQ), trQMat), 0))\n d2 = numpy.diagonal(numpy.dot(numpy.dot(trQMat, mIEQ), trT), 0)\n\n psi1 = numpy.dot(d1, phiM)\n psi2 = numpy.dot(-d2, phiM)\n\n v1 = psi1 - one * numpy.dot(psi1, eq)\n v3 = psi2 - one * numpy.dot(psi2, eq)\n\n part1 = numpy.outer(eq, v1)\n part2 = numpy.dot(numpy.dot(mEQ, trQMat), mIEQ)\n part3 = numpy.outer(eq, v3)\n\n sensitivity = part1 + part2 + part3\n\n return sensitivity", "def sym_epipolar_dist(corr, F):\n corrs_temp = np.zeros(4)\n corrs_temp[1] = corr[0]\n corrs_temp[0] = corr[1]\n corrs_temp[2] = corr[3]\n corrs_temp[3] = corr[2]\n corr = corrs_temp\n p1 = np.hstack([corr[:2],1])\n p2 = np.hstack([corr[2:],1])\n first_term = (F @ p1)[:-1]\n second_term = (F.T @ p2)[:-1]\n coeff = (p2.T @ F @ p1)**2\n\n return coeff * (1/(np.linalg.norm(first_term)**2) + 1/(np.linalg.norm(second_term)**2))", "def _corr_kw(n):\r\n return n ** 3 - n", "def sym_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()", "def RSS(X,Y,w):\n v = Y[:,0]- (np.dot(X,w[1:]) + w[0])\n return np.dot(v,v)", "def test_decompose(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.two_mode_squeezing(r, phi)\n\n # test that S = B^\\dagger(pi/4, 0) [S(z) x S(-z)] B(pi/4)\n # fmt:off\n B = np.array([[1, -1, 0, 0], [1, 1, 0, 0], [0, 0, 1, -1], [0, 0, 1, 1]])/np.sqrt(2)\n\n Sq1 = np.array([[np.cosh(r)-np.cos(phi)*np.sinh(r), -np.sin(phi)*np.sinh(r)],\n [-np.sin(phi)*np.sinh(r), np.cosh(r)+np.cos(phi)*np.sinh(r)]])\n\n Sq2 = np.array([[np.cosh(-r)-np.cos(phi)*np.sinh(-r), -np.sin(phi)*np.sinh(-r)],\n [-np.sin(phi)*np.sinh(-r), np.cosh(-r)+np.cos(phi)*np.sinh(-r)]])\n # fmt:on\n\n Sz = block_diag(Sq1, Sq2)[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\n expected = B.conj().T @ Sz @ B\n assert np.allclose(S, expected, atol=tol, rtol=0)", "def pwc_symmetric(t, params):\n t_bin_start = tf.cast(params['t_bin_end'].get_value(), dtype=tf.float64)\n t_bin_end = tf.cast(params['t_bin_start'].get_value(), dtype=tf.float64)\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n inphase = tf.cast(params['inphase'].get_value(), dtype=tf.float64)\n\n t_interp = tf.where(tf.greater(t, t_final / 2), - t + t_final , t)\n shape = tf.reshape(\n tfp.math.interp_regular_1d_grid(t_interp, t_bin_start, t_bin_end, inphase, fill_value_below=0, fill_value_above=0), [len(t)])\n\n return shape", "def deconv(Y0, X0, lag, pord=1, dord=1, snr2=None, clen2=None, dspl=1, sidx=0, ntrn=None, vthresh=0., cdim=None, Nexp=0, vreg=1e-8, polytrend=False, smth=False):\n assert X0.ndim == Y0.ndim == 2\n assert X0.shape[1] == Y0.shape[1]\n assert pord >= dord\n # if pord>1 or dord>1:\n # raise ValueError('pord>1 or dord>1 not supported!')\n\n Nt = X0.shape[1] # length of observations\n\n # external input\n if dord>0:\n if smth:\n X1 = Tools.KZ_filter(X0.T, 24, 1, method=\"mean\", causal=False).T\n Y1 = Tools.KZ_filter(Y0.T, 24, 1, method=\"mean\", causal=False).T\n else:\n X1, Y1 = X0, Y0\n\n dX = np.zeros_like(X0) * np.nan; dX[:,dord:] = np.diff(X1, dord, axis=-1)\n dY = np.zeros_like(Y0) * np.nan; dY[:,dord:] = np.diff(Y1, dord, axis=-1)\n # or:\n # dX = Tools.sdiff(X0, dsp, axis=-1)\n # dY = Tools.sdiff(Y0, dsp, axis=-1)\n else:\n dX, dY = X0, Y0\n\n Xvar0 = Tools.mts_cumview(dX, lag) # cumulative view for convolution\n # polynominal trend\n # division by Nt and multiplication by 10: normalization for numerical stability\n # *100 or *1 numerically works worse\n Xvar1 = Tools.dpvander(np.arange(Nt)/Nt*10, pord, dord)\n Xvar = np.vstack([Xvar0, Xvar1[:-1,:]]) #[:-1,:] removes the constant trend which may cause non-invertible covariance matrix. If the constant trend is kept here, Yprd at the end of this function should be modified accordingly like this:\n # Amat0 = Amat[:, :-(pord-dord+1)] ...\n Yvar = dY\n\n # construct the covariance matrix of the Gaussian process\n if clen2 is not None and clen2 > 0 and snr2 is not None and snr2 >= 0:\n if dord > 0:\n W0 = _dgp_cov_matrix(Nt, snr2, clen2)\n if dord > 1:\n warnings.warn(\"The current implementation of the GP covariance matrix is not exact for dord>1.\")\n else:\n W0 = _gp_cov_matrix(Nt, snr2, clen2)\n else:\n W0 = None # invalid parameters, equivalent to W0=np.eye(Nt)\n\n # prepare regressor\n regressor = Stat.dim_reduction_pca(Stat.random_subset(Stat.multi_linear_regression))\n # regressor = dim_reduction_cca(random_subset(multi_linear_regression)) # not recommended\n # regressor = random_subset(dim_reduction_pca(multi_linear_regression))\n # regressor = dim_reduction_pca(random_subset(percentile_subset(multi_linear_regression)))\n\n # training data\n (tidx0, tidx1), _ = Stat.training_period(Nt, tidx0=sidx, ntrn=ntrn) # valid training period\n Xtrn, Ytrn = Xvar[:,tidx0:tidx1:dspl], Yvar[:,tidx0:tidx1:dspl] # down-sampling of training data\n # GLS matrix\n if W0 is not None :\n Winv = la.inv(W0[tidx0:tidx1:dspl,:][:,tidx0:tidx1:dspl])\n else:\n Winv = None # equivalent to np.eye(Xtrn.shape[1])\n\n # regresion\n # method (\"mean\" or \"median\") used in random_subset is active only when Nexp>0\n # corrflag=False\n # corrflag (bool): if True use the correlation matrix for dimension reduction\n # ((Amat,Amatc), Cvec, _, _), toto = regressor(Ytrn, Xtrn, Winv, vthresh=vthresh, corrflag=corrflag, Nexp=Nexp, method=\"mean\")\n (Amat, Cvec, *_), (Amatc, *_) = regressor(Ytrn, Xtrn, Winv, vthresh=vthresh, cdim=cdim, Nexp=Nexp, method=\"mean\", vreg=vreg)\n Err = Yvar - (Amat @ Xvar + Cvec) # differential residual\n Sig = Stat.cov(Err, Err) # covariance matrix\n Amat0 = Amat[:, :Amat.shape[-1]-(pord-dord)] # kernel matrix corresponding to the external input X(t) only, without polynomial trend\n # Amat0 = Amat[:, :-(pord-dord)] if pord-dord > 0 else Amat\n # if kthresh>0:\n # Amat[np.abs(Amat)<kthresh] = 0\n\n # prediction\n Xcmv0 = Tools.mts_cumview(X0, lag)\n if polytrend: # with the polynomial trend, ie: return A*X(t) + P(t)\n # polynominal trend\n Xcmv1 = Tools.dpvander(np.arange(Nt)/Nt, pord, 0)\n Xcmv = np.vstack([Xcmv0, Xcmv1[:(pord-dord+1),:]])\n # Xcmv[np.isnan(Xcmv)] = 0 # Remove nans will introduce large values around discontinuties\n Yflt = np.hstack([Amat, Cvec]) @ Xcmv\n else: # without the polynomial trend, ie: return A*X(t)\n Yflt = Amat0 @ Xcmv0\n\n # Yprd = Yflt\n if dord > 0:\n Yprd = Yflt - Tools.polyprojection(Yflt, deg=dord-1, axis=-1) # projection \\Psi^\\dagger \\Psi\n else:\n Yprd = Yflt\n\n return Yprd, Amat, Amatc", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)" ]
[ "0.68297875", "0.58642405", "0.5714483", "0.56672794", "0.56156206", "0.55587417", "0.5539916", "0.55214864", "0.54957104", "0.548836", "0.54881096", "0.5487721", "0.54838127", "0.54819167", "0.5476712", "0.54497755", "0.54356843", "0.5432515", "0.5406844", "0.53943413", "0.53797656", "0.53707796", "0.53483963", "0.5339923", "0.5324056", "0.5314048", "0.53126323", "0.53098845", "0.5296419", "0.5284734" ]
0.7474815
0
Grab the name of the binary we're running in.
def get_binary_name(): return os.path.basename(inspect.stack()[-1][1])[:16]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_program_name():\n program_path = os.path.abspath(sys.argv[0])\n if os.path.exists(program_path):\n return os.path.basename(program_path)\n else:\n match = re.match(r\"^.*(?:\\.egg|\\.tar|\\.tar\\.gz)(?=/)\", program_path, re.IGNORECASE)\n if (match is not None) and os.path.exists(match.group(0)):\n # python script is embedded in egg\n return os.path.basename(program_path)\n else:\n return \"unknown\"", "def bin_name() -> str:\n # NB: This will be called at import-time in several files to define static help strings\n # (e.g. \"help=f'run `{bin_name()} fmt`\").\n #\n # Ideally, we'd assert this is set unconditionally before Pants imports any of the files which\n # use it, to give us complete confidence we won't be returning \"./pants\" in our help strings.\n #\n # However, this assumption really breaks down when we go to test pants (or a plugin author goes\n # to test their plugin). Therefore we give a fallback and have integration test(s) to assert\n # we've set this at the right point in time.\n #\n # Note that __PANTS_BIN_NAME is set in options_bootstrapper.py based on the value of the\n # pants_bin_name global option, so you cannot naively modify this by setting __PANTS_BIN_NAME\n # externally. You must set that option value in one of the usual ways.\n return os.environ.get(\"__PANTS_BIN_NAME\", \"./pants\") # noqa: PANTSBIN", "def get_executable(self) -> str:\n ...", "def call_name(self):\n return str(self.executable.name)", "def get_runinfo_basename():\n return \"dumpruninfo\"", "def executable_name(self):\n return \"./simulink_househeat\"", "def programName(self):\n return self._parser.prog", "def getExeName(self):\n # define a callback to handle the retrieved lines:\n def cb(line, lines): \n lines.append(line)\n # retrieve the contents of the batch file into 'lines':\n lines = [];\n self.ftp.retrlines('retr RUN_FEMC.BAT', lambda line: cb(line, lines))\n return lines[0]", "def getBinary():\n binary = shutil.which(_ROCKETLOGGER_BINARY)\n\n if not os.path.exists(binary):\n raise FileNotFoundError(f\"Could not find RocketLogger CLI binary! [{binary}]\")\n return os.path.abspath(binary)", "def name(self):\n return self._path or '__main__'", "def _prog(shell_cmd):\n cmd = _which(shell_cmd)\n return os.path.basename(cmd) if cmd else None", "def find_bin_by_name(bin_name):\n bin_path = shutil.which(bin_name)\n if bin_path is None:\n return None\n else:\n major = parse_version_major(bin_path)\n return bin_path if major == required_clang_format_major else None", "def exe(self, name):\n\n return name", "def find_program(name):\r\n return name", "def executable_name(basename: str) -> str:\n if os.name == 'nt':\n return f\"{basename}.exe\"\n else:\n return basename", "def executable():\n return sys.executable", "def get_package_name():\n\tpackage = None\n\ttry:\n\t\tpackage = os.environ.get('LOCAL_PART', '') + os.environ.get('LOCAL_PART_SUFFIX', '') \n\t\tif not package and len(sys.argv) > 1:\n\t\t\tpackage = sys.argv[-1].lower()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\tfinally:\n\t\treturn package", "def getJobName(programPath=None):\n if programPath is None:\n return \"Bat_%d\" % os.getpid()\n else:\n return \"Bat_%s\" % (os.path.split(programPath)[1])", "def _executable(self) -> str:\n return sys.executable", "def _get_so_name(filename):\n # TODO verify that objdump works on other unixes and not Linux only.\n cmd = [\"objdump\", \"-p\", filename]\n pattern = r'\\s+SONAME\\s+([^\\s]+)'\n if is_solar:\n cmd = [\"elfdump\", \"-d\", filename]\n pattern = r'\\s+SONAME\\s+[^\\s]+\\s+([^\\s]+)'\n m = re.search(pattern, compat.exec_command(*cmd))\n return m.group(1)", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def get_python_exe():\n\n py = str(sc.sticky[\"PythonExe\"])\n\n return py", "def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)", "def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"", "def SlaveBuildName(chrome_dir):\n return os.path.basename(SlaveBaseDir(chrome_dir))", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def exe_filename(self):", "def _get_invocation_id():\n bazel_id_directory = os.getenv(\"KOKORO_ARTIFACTS_DIR\")\n bazel_id_file = os.path.join(bazel_id_directory, \"bazel_invocation_ids\")\n assert os.path.isfile(bazel_id_file), (\n \"bazel_invocation_ids file, written \"\n \"by RBE initialization script, expected but not found.\"\n )\n with open(bazel_id_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")", "def get_name():\n return config.APP_NAME" ]
[ "0.72268015", "0.72012603", "0.7187184", "0.7005614", "0.69355136", "0.67844146", "0.67611265", "0.6756073", "0.66813314", "0.66522294", "0.6628342", "0.6627381", "0.6627222", "0.6603379", "0.6576827", "0.65747535", "0.6540388", "0.6478583", "0.64458454", "0.6427517", "0.64006793", "0.63754654", "0.635766", "0.63559604", "0.63553697", "0.6298096", "0.62891006", "0.62248075", "0.6222227", "0.62192595" ]
0.8264518
0
Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is , so if novacompute creates a chain named 'OUTPUT', it'll actually end up named 'novacomputeOUTPUT'.
def add_chain(self, name, wrap=True): if wrap: self.chains.add(name) else: self.unwrapped_chains.add(name) self.dirty = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chain_name(self) -> str:\n return pulumi.get(self, \"chain_name\")", "def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self", "def add_chain(self, chain, delay_sort = True):\n assert isinstance(chain, Chain)\n\n try:\n model = self.model_dict[chain.model_id]\n except KeyError:\n model = Model(model_id = chain.model_id)\n self.add_model(model, delay_sort)\n\n model.add_chain(chain, delay_sort)", "def add_chain(self, chain, delay_sort=False):\n assert isinstance(chain, Chain)\n\n if self.chain_dict.has_key(chain.chain_id):\n raise ChainOverwrite()\n\n self.chain_list.append(chain)\n self.chain_dict[chain.chain_id] = chain\n chain.model = self\n\n if not delay_sort:\n self.chain_list.sort()", "def chain_new(ctx, chain_name):\n project = ctx.obj['PROJECT']\n new_local_chain(project.project_dir, chain_name)", "def chainless_name(self, chainless_name):\n if self.local_vars_configuration.client_side_validation and chainless_name is None: # noqa: E501\n raise ValueError(\"Invalid value for `chainless_name`, must not be `None`\") # noqa: E501\n\n self._chainless_name = chainless_name", "def create_callback(self, chain):\n if chain == ['']:\n # No need to create root\n return\n cr_path = self._to_path(chain)\n assert not os.path.exists(cr_path),\\\n \"{} already exists\".format(cr_path)\n os.mkdir(cr_path)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id", "def chain_full_name(alignment, chain):\n return '%s_%s' % (alignment, chain)", "def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain", "def add_chain_to_model(chain, model, atoms):\n\n if chain[\"type\"] == \"polymer\" or chain[\"type\"] == \"branched\":\n polymer = {\n \"internal_id\": chain[\"internal_id\"], \"sequence\": chain[\"sequence\"],\n \"helices\": [], \"strands\": [], \"residues\": {}\n }\n for i, group in enumerate(chain[\"groups\"], start=1):\n add_het_to_dict(group, chain, atoms, polymer[\"residues\"], number=i)\n add_ss_to_chain(polymer)\n model[\"polymer\"][chain[\"id\"]] = polymer\n else:\n for group in chain[\"groups\"]:\n add_het_to_dict(group, chain, atoms, model[chain[\"type\"]])", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for atm in self.iter_atoms():\n atm.set_chain_id(chain_id)", "def Chain(self, chain, **kwargs):\n\n from trulens_eval.tru_chain import TruChain\n\n return TruChain(tru=self, app=chain, **kwargs)", "def chainExists(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn True\n\n\t\treturn False", "def set_chain_id(self, chain_id):\n ## check for conflicting chain_id in the structure\n if self.model is not None:\n chk_chain = self.model.get_chain(chain_id)\n if chk_chain is not None or chk_chain != self:\n raise ChainOverwrite()\n\n Segment.set_chain_id(self, chain_id)\n\n ## resort the parent structure\n if self.model is not None:\n self.model.chain_list.sort()", "def add_chain_signature(\n self, prop: str, key: JWK, alg: Optional[AlgorithmName] = None,\n header: Optional[JsonObject] = None) -> None:\n top_level_signature = self._payload.get(prop)\n for k in top_level_signature.keys():\n if k != _CHAIN:\n del top_level_signature[k]\n chain = top_level_signature.get(_CHAIN, [])\n self._add_signature(prop, key, alg, header,\n lambda h: {_CHAIN: chain + [h]},\n lambda h: (self._payload\n .setdefault(prop, {})\n .setdefault(_CHAIN, [])\n .append(h)))", "def chain(self, chain):\n\n self._chain = chain", "def registerChain(cls, chainDict, chainConfig):\n assert chainDict['chainName'] not in cls.__allChainDicts, 'Chain dictionary {} already registered'.format(chainDict['chainName'])\n assert chainConfig.name not in cls.__allChainConfigs, 'Chain configuration {} already registered'.format(chainConfig.name)\n assert chainDict['chainName'] == chainConfig.name, 'Registering chain dictionary and config that have differnet names: in dictionary {}, in config {}'.format(chainDict['chainName'], chainConfig.name)\n\n cls.__allChainConfigs[chainConfig.name] = chainConfig\n cls.__allChainDicts[chainDict['chainName']] = chainDict\n log.debug(\"Registered chain %s\", chainConfig.name )", "def add_simple_chain_to_chord(self, task_type, chain_, adapter_config):\n LOG.debug(f\"simple chain with {chain_}\")\n all_chains = []\n for step in chain_:\n # Make a list of new task signatures with modified cmd and workspace\n # based off of the parameter substitutions and relative_path for\n # a given sample.\n\n new_steps = [task_type.s(step, adapter_config=adapter_config).set(queue=step.get_task_queue())]\n all_chains.append(new_steps)\n add_chains_to_chord(self, all_chains)", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def chainLabel(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef char label[2]\n label[0] = freesasa_structure_atom_chain(self._c_structure,i)\n label[1] = '\\0'\n return label", "def chain_id(self, chain_id):\n if chain_id is None:\n raise ValueError(\"Invalid value for `chain_id`, must not be `None`\") # noqa: E501\n\n self._chain_id = chain_id", "def chain_id(self, chain_id):\n if self.local_vars_configuration.client_side_validation and chain_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `chain_id`, must not be `None`\") # noqa: E501\n allowed_values = [\"kcitymarket\", \"ksupermarket\", \"kmarket\", \"nokm\", \"kmyllypuro\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and chain_id not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `chain_id` ({0}), must be one of {1}\" # noqa: E501\n .format(chain_id, allowed_values)\n )\n\n self._chain_id = chain_id", "def add_solid(self, solid_name):\n return self.__add_solid(solid_name)", "def remove_chain(self, name, wrap=True):\n if wrap:\n chain_set = self.chains\n else:\n chain_set = self.unwrapped_chains\n\n if name not in chain_set:\n return\n\n self.dirty = True\n\n # non-wrapped chains and rules need to be dealt with specially,\n # so we keep a list of them to be iterated over in apply()\n if not wrap:\n self.remove_chains.add(name)\n chain_set.remove(name)\n if not wrap:\n self.remove_rules += filter(lambda r: r.chain == name, self.rules)\n self.rules = filter(lambda r: r.chain != name, self.rules)\n\n if wrap:\n jump_snippet = '-j %s-%s' % (binary_name, name)\n else:\n jump_snippet = '-j %s' % (name,)\n\n if not wrap:\n self.remove_rules += filter(lambda r: jump_snippet in r.rule,\n self.rules)\n self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)", "def _fix_chainID(self):\n\n from string import ascii_uppercase\n\n # get the current names\n data = self.get('chainID')\n natom = len(data)\n\n #get uniques\n chainID = []\n for c in data:\n if c not in chainID:\n chainID.append(c)\n\n if chainID == ['A','B']:\n return\n\n if len(chainID)>26:\n print(\"Warning more than 26 chains have been detected. This is so far not supported\")\n sys.exit()\n\n # declare the new names\n newID = [''] * natom\n\n # fill in the new names\n for ic,chain in enumerate(chainID):\n index = self.get('rowID',chainID=chain)\n for ind in index:\n newID[ind] = ascii_uppercase[ic]\n\n # update the new name\n self.update_column('chainID',newID)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for frag in self.iter_fragments():\n frag.set_chain_id(chain_id)", "def add_message_chain(self, request_id, message_chain):\n logger.debug('request_id = %s' % request_id)\n with self._message_chains_lock:\n self._message_chains[request_id] = message_chain", "def wire_chains(self):\n allChains = self.instances.getAllChainInstances()\n for chain in allChains:\n logging.debug(\"%s\", chain)\n allChains[chain].setup_event_path()" ]
[ "0.65356433", "0.6292502", "0.5971866", "0.58491284", "0.5739743", "0.56037813", "0.5452546", "0.5436754", "0.5386792", "0.5381447", "0.52454704", "0.5211677", "0.51958865", "0.51822054", "0.5169344", "0.5091612", "0.50894535", "0.50690097", "0.5043837", "0.5038757", "0.50186026", "0.50089496", "0.49963078", "0.49951494", "0.49937806", "0.4945512", "0.49242452", "0.4915294", "0.4895036", "0.48928475" ]
0.7146354
0
Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged.
def remove_chain(self, name, wrap=True): if wrap: chain_set = self.chains else: chain_set = self.unwrapped_chains if name not in chain_set: return self.dirty = True # non-wrapped chains and rules need to be dealt with specially, # so we keep a list of them to be iterated over in apply() if not wrap: self.remove_chains.add(name) chain_set.remove(name) if not wrap: self.remove_rules += filter(lambda r: r.chain == name, self.rules) self.rules = filter(lambda r: r.chain != name, self.rules) if wrap: jump_snippet = '-j %s-%s' % (binary_name, name) else: jump_snippet = '-j %s' % (name,) if not wrap: self.remove_rules += filter(lambda r: jump_snippet in r.rule, self.rules) self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(ctx, schain_name):\n skale = ctx.obj['skale']\n skale.manager.delete_schain(schain_name, wait_for=True,\n gas_price=4500000000)\n print(f'sChain {schain_name} removed!')", "def removeChain(self, mychain):\n\n\t\tichain = self.getChain(mychain)\t\n\t\tif ichain == None:\n\t\t\treturn\n\n\t\tself.chain.remove(ichain)", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.model_dict[chain.model_id].remove_chain(chain)", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.chain_list.remove(chain)\n del self.chain_dict[chain.chain_id]\n chain.model = None", "def remove_chain(self, chain, color, current_state):\r\n for position in self.chains[(chain, color)]:\r\n current_state[position[0]][position[1]] = 0\r\n return current_state", "def test_rewrite_existing_chain_remove_normal_dependency(self):\n self.txn.store_rewrite_chain(\"felix-a\", [\"foo\"], set([\"felix-stub\"]))\n self.assertEqual(self.txn.affected_chains, set([\"felix-a\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([]))\n self.assertEqual(self.txn.referenced_chains, set([\"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [\"foo\"],\n \"felix-b\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-stub\": set([\"felix-a\"])})", "def remove_causal_chain_from_local_Q(self, trial_name, chain_idx):\n for state in range(self.local_Q[trial_name].shape[0]):\n self.local_Q[trial_name][state][chain_idx] = 0", "def remove_phase_from_killchain(self, killchain):\n phase = request.json['phase']\n kc = Setting.find(name=killchain)\n if not kc:\n return '', 404\n try:\n kc.remove_phase_from_killchain(phase)\n except RuntimeException as exception:\n return exception, 400\n return kc.get_killchain()", "def remove_message_chain(self, request_id):\n logger.debug('request_id = %s' % request_id)\n with self._message_chains_lock:\n del self._message_chains[request_id]", "def empty_chain(self, chain, wrap=True):\n chained_rules = [rule for rule in self.rules\n if rule.chain == chain and rule.wrap == wrap]\n if chained_rules:\n self.dirty = True\n for rule in chained_rules:\n self.rules.remove(rule)", "def remove_callback(self, chain):\n rem_path = self._to_path(chain)\n assert os.path.isdir(rem_path),\\\n \"Requested removal of non-existent dir {}\".format(rem_path)\n shutil.rmtree(rem_path)", "def test_rewrite_existing_chain_remove_stub_dependency(self):\n self.txn.store_rewrite_chain(\"felix-a\", [\"foo\"], set([\"felix-b\"]))\n self.assertEqual(self.txn.affected_chains,\n set([\"felix-a\", \"felix-stub\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-stub\"]))\n self.assertEqual(self.txn.referenced_chains, set([\"felix-b\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [\"foo\"],\n \"felix-b\": [],\n \"felix-c\": []\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"])})", "def strip_loan(chain):\n while chain[-1]['action'] == 'LOAN':\n chain.pop()\n\n return chain", "def remember_remove(self, name):\n\n for branch in self.__history:\n if History.name(branch) == name:\n self.__history.extend(History.children(branch))\n self.__history.remove(branch)\n break\n resulting_orphans, remove_occurred = History.remove_from_node(branch, name)\n if remove_occurred:\n self.__history.extend(resulting_orphans)\n break", "def remove_all(ctx):\n skale = ctx.obj['skale']\n cnt = 0\n for sname in get_all_schains_names(skale):\n skale.manager.delete_schain(sname)\n cnt += 1\n print(f'Success. {cnt} schains were removed')", "def remove_callback(self, chain):\n for reactor in self._reactors:\n reactor.remove_callback(chain)", "def remove(self, compname):\n refs = self.find_referring_exprs(compname)\n if refs:\n self._exprgraph.remove_nodes_from(refs)\n self._remove_disconnected_exprs()", "def test_delete_required_chain_stub(self):\n # Exit the graceful restart period, during which we do not stub out\n # chains.\n self.ipt.cleanup(async=True)\n # Install a couple of chains. foo depends on bar.\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"],\n \"bar\": [\"--append bar --jump ACCEPT\"]},\n {\"foo\": set([\"bar\"]),\n \"bar\": set()},\n async=True,\n )\n self.step_actor(self.ipt)\n # Both chains should be programmed as normal.\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [\"--append bar --jump ACCEPT\"] })\n\n # Deleting bar should stub it out instead.\n self.ipt.delete_chains([\"bar\"], async=True)\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"] })", "def remove(self, name):\n id_ = self.name_to_id(name)\n # Top nybbles of table entries are id_ + 1 (to avoid all-zero entries)\n id_in_table = (self.table >> self.maxtimebits) == id_ + 1\n hashes_removed = 0\n for hash_ in np.nonzero(np.max(id_in_table, axis=1))[0]:\n vals = self.table[hash_, :self.counts[hash_]]\n vals = [v for v, x in zip(vals, id_in_table[hash_])\n if not x]\n self.table[hash_] = np.hstack([vals,\n np.zeros(self.depth - len(vals))])\n # This will forget how many extra hashes we had dropped until now.\n self.counts[hash_] = len(vals)\n hashes_removed += np.sum(id_in_table[hash_])\n self.names[id_] = None\n self.hashesperid[id_] = 0\n self.dirty = True\n print(\"Removed\", name, \"(\", hashes_removed, \"hashes).\")", "def remove_callback(self, chain):", "def remove(self, name):\r\n goals = self.goals()\r\n for goal in goals:\r\n if goal.name == name:\r\n goals.remove(goal)\r\n return self\r\n raise GoalError('Goal %s does not exist in this phase, members are: %s' % (name, goals))", "def remove_node(self, name):\n parent_names = self.get_parents(name)\n self.source_net.remove_node(name)\n\n # Remove sole private parents\n for p in parent_names:\n if p[0] == '_' and self.source_net.degree(p) == 0:\n self.remove_node(p)", "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []", "def remove(name):", "def delete_callback(self, chain, value):\n for reactor in self._reactors:\n reactor.delete_callback(chain, value)", "def remove(self, name):\n cont = getattr(self, name)\n self.disconnect(name)\n self._exprmapper.remove(name)\n if has_interface(cont, IComponent):\n self._depgraph.remove(name)\n for obj in self.__dict__.values():\n if obj is not cont and is_instance(obj, Driver):\n obj.workflow.remove(name)\n obj.remove_references(name)\n\n return super(Assembly, self).remove(name)", "def delete_node(self, key_chain):\n node = self._data\n for key in key_chain[:-1]:\n node = node[key]\n\n del node[key_chain[-1]]", "def remove_curve(self, name):\n self._curve_reg.__delitem__(name)", "def remove_extra_path_effect(self, name: str):\n self.extra_path_effects.pop(name)", "def test_unrequired_chain_delete(self):\n self.txn.store_delete(\"felix-c\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-c\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-c\"]))\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-b\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})" ]
[ "0.66127115", "0.6543583", "0.65178925", "0.63441426", "0.6132853", "0.5970545", "0.59696996", "0.59487593", "0.59189224", "0.5915002", "0.5893214", "0.58134645", "0.5795842", "0.57516927", "0.5745285", "0.5725543", "0.5719747", "0.56921446", "0.55726975", "0.54871327", "0.5456069", "0.54432505", "0.54426306", "0.5396806", "0.53890693", "0.53879994", "0.5386228", "0.5346583", "0.5321529", "0.5217817" ]
0.7302495
0
Add a rule to the table. This is just like what you'd feed to iptables, just without the 'A ' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly.
def add_rule(self, chain, rule, wrap=True, top=False): if wrap and chain not in self.chains: raise ValueError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join(map(self._wrap_target_chain, rule.split(' '))) rule_obj = IptablesRule(chain, rule, wrap, top) if rule_obj not in self.rules: self.rules.append(IptablesRule(chain, rule, wrap, top)) self.dirty = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rule(self, rule):\n self.rule.append(rule)", "def add_rule(self, rule) -> None:\n self.add_rules([rule])", "def add_rule(self, rule):\n \n self.rules.append(rule)", "def add_rule(self, rule: Rule):\n self.rules.append(rule)", "def add_rule(self, rule: interpreter.Rule) -> None:\n\n if rule.target not in self.rules:\n self.rules[rule.target] = rule\n else:\n self.rules[rule.target] |= rule", "def add_rule(self, rule):\n assert isinstance(rule, Rule)\n self.rule.append(rule)", "def add_rule(rule):\n global RULE_DICT\n\n if rule[0] not in RULE_DICT:\n RULE_DICT[rule[0]] = []\n RULE_DICT[rule[0]].append(rule[1:])", "def rule_add(self, rulename, rule, commentline):\n\n if '->' in rule:\n zeroes = '|'.join(self.zerosymbols)\n rule = '[~$[' + zeroes + '] .o. [' + rule + ']]/[' + zeroes + ']'\n\n FST.define(rule, rulename)\n myrule = FST(rule)\n self.rules[rulename] = myrule\n self.comments[rulename] = commentline", "def insert_rule(rule, table=None):\n if not rule_exists(rule, table=table):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-I\"] + rule\n return call(cmdline)", "def append_rule(self, rule):\n\n self._control_manager.append_rule(rule)", "def add_rule(self, rule, on=None, off=None, strength=1.):\n\n self.x[on:off, :, get_rule_index(rule, self.config)] = strength", "def add_rules(self, rules):\n self.name.append(rules)", "def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def addRule(self, ruleLine):\n cols = ruleLine.split(' ')\n positionNumber = int(cols[0])\n self._rules[positionNumber] = {}\n for i in range(1, len(cols)):\n self._rules[positionNumber][cols[i].upper()] = 1", "def _add_rule(self, rule):\r\n rule = re.sub(r'\\s*', '', rule)\r\n\r\n # split it on the arrow\r\n non_terminal, productions = rule.split('->')\r\n for production in productions.split('|'):\r\n self.productions.append(Production(non_terminal, list(production)))", "def __add__(self, right_rule):\n self.__subrules.append(right_rule)\n return self", "def insert(self, rule, ident=None):\n if ident is None:\n self.rules.append(rule)\n else:\n self.rules.insert(ident, rule)", "def add_rules(self, rules):\n self.model_sort.handler_block(self.row_reordered_signal)\n i = len(self.model)\n format_protocol_int = lambda s: 'ip' if not s else '\\n'.join(map(Operator.to_string, s))\n format_protocol = lambda s, n: '\\n'.join(set(n)) if n else format_protocol_int(s)\n format_int = lambda s: \"any\" if len(s) == 0 else '\\n'.join(map(Operator.to_string, s))\n format = lambda s, n: '\\n'.join(set(n)) if n else format_int(s)\n for r in rules:\n self.model_sort.get_model().append([r.identifier,\n r.name,\n format_protocol(r.protocol, r.protocol_name),\n format(r.ip_source, r.ip_source_name),\n format(r.port_source, r.port_source_name),\n format(r.ip_dest, r.ip_dest_name),\n format(r.port_dest, r.port_dest_name),\n r.action.to_string(),\n r.action.get_action_color(),\n '#FFFFFF' if i % 2 == 0 else '#DCDCDC'])\n i += 1\n self.model_sort.handler_unblock(self.row_reordered_signal)", "def insert(self, rule, ident):\n self[ident] = rule", "def add_rule(cls, rule: RewriteRule) -> None:\n if not isinstance(rule, RewriteRule):\n raise ValueError(f\"add_rule expected a RewriteRule not a '{type(rule)}'.\")\n cls.rules.append(rule)", "def _add_rule(cls, rule_suffix: str) -> None:\n if not cls._does_rule_exist(rule_suffix):\n cls._insert_rule(rule_suffix)", "def add_rule(self, conjunct_list, feature_table=None, rule_name=None):\n\n if rule_name is not None and rule_name in self.rules.keys():\n logger.error('A rule with the specified rule_name already exists.')\n raise AssertionError('A rule with the specified rule_name already exists.')\n\n if feature_table is None and self.feature_table is None:\n logger.error('Either feature table should be given as parameter ' +\n 'or use set_feature_table to set the feature table.')\n raise AssertionError('Either feature table should be given as ' +\n 'parameter or use set_feature_table to set ' +\n 'the feature table.')\n\n if not isinstance(conjunct_list, list):\n conjunct_list = [conjunct_list]\n\n fn, name, fn_str = self._create_rule(conjunct_list, feature_table, rule_name)\n\n self.rules[name] = fn\n self.rule_source[name] = fn_str\n self.rule_str[name] = conjunct_list\n if feature_table is not None:\n self.rule_ft[name] = feature_table\n else:\n self.rule_ft[name] = self.feature_table\n\n return name", "def add_rule(self, rule: validation.rule.Rule):\n self._rules.append(rule)\n\n return self", "def _addrule(self, nonterm, program, params, info):\n rule = Rule(nonterm, program, params, info)\n\n if not nonterm in self.rules:\n self.rules[nonterm] = []\n \n self.rules[nonterm].append(rule)", "def AddRule(self, rule_string, source):\n (add_rule, rule_dir) = ParseRuleString(rule_string, source)\n # Remove any existing rules or sub-rules that apply. For example, if we're\n # passed \"foo\", we should remove \"foo\", \"foo/bar\", but not \"foobar\".\n self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)]\n self._rules.insert(0, Rule(add_rule, rule_dir, source))", "def addStyleRuleBased(self, color, rule, minScale, maxScale, label):\r\n newLabel = QtGui.QTableWidgetItem(label)\r\n newRule = QtGui.QTableWidgetItem(rule)\r\n newMinScale = QtGui.QTableWidgetItem(minScale)\r\n newMaxScale = QtGui.QTableWidgetItem(maxScale)\r\n newColor = QtGui.QTableWidgetItem('')\r\n newColor.setBackgroundColor(color)\r\n newColor.setFlags(newColor.flags() & ~QtCore.Qt.ItemIsEditable)\r\n newLabel.setFlags(newLabel.flags() & ~QtCore.Qt.ItemIsEditable)\r\n newRule.setFlags(newRule.flags() & ~QtCore.Qt.ItemIsEditable)\r\n newMinScale.setFlags(newMinScale.flags() & ~QtCore.Qt.ItemIsEditable)\r\n newMaxScale.setFlags(newMaxScale.flags() & ~QtCore.Qt.ItemIsEditable)\r\n currentRowCount = self.tableStyleRuleBased.rowCount()\r\n self.tableStyleRuleBased.insertRow(currentRowCount)\r\n self.tableStyleRuleBased.setItem(currentRowCount, 0, newColor)\r\n self.tableStyleRuleBased.setItem(currentRowCount, 1, newLabel)\r\n self.tableStyleRuleBased.setItem(currentRowCount, 2, newRule)\r\n self.tableStyleRuleBased.setItem(currentRowCount, 3, newMinScale)\r\n self.tableStyleRuleBased.setItem(currentRowCount, 4, newMaxScale)", "def add_rule(self, selectors, properties):\n self.cliques.append((selectors, properties))", "def add_acl_rule_to_acl(self, acl_name=None, rule_id='', action=None, conditions=None):\n pass", "def add_rule_to_tree(self, root, rule):\n\n try:\n root[rule.action_location.treeposition]\n position = rule.action_location.treeposition\n rule.action_location.original_treeposition = position\n rule.action_location.treeposition = ()\n root[position].rules += [rule]\n except IndexError:\n rule.action_location.original_treeposition = rule.action_location.treeposition\n root.rules += [rule]\n return root", "def _insert_rule(cls, rule_suffix: str) -> None:\n insert_rule = cls._build_rule_string(IpTableCommandOption.INSERT, rule_suffix)\n log.info('Adding rule \"%s\"', insert_rule)\n utils.run_command(insert_rule, shell=True)" ]
[ "0.69831085", "0.6929167", "0.69029045", "0.6722005", "0.66983014", "0.6664679", "0.66394943", "0.66027844", "0.65009797", "0.647024", "0.64614254", "0.62848955", "0.6235584", "0.61647195", "0.6121893", "0.6088669", "0.6055803", "0.59040135", "0.58810866", "0.5879263", "0.5875525", "0.58629626", "0.58551747", "0.5840321", "0.580075", "0.579424", "0.5777581", "0.5775771", "0.5719358", "0.57125914" ]
0.72605485
0
Remove a rule from a chain.
def remove_rule(self, chain, rule, wrap=True, top=False): try: self.rules.remove(IptablesRule(chain, rule, wrap, top)) if not wrap: self.remove_rules.append(IptablesRule(chain, rule, wrap, top)) self.dirty = True except ValueError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeRule(self, *args):\n return _libsbml.Model_removeRule(self, *args)", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.model_dict[chain.model_id].remove_chain(chain)", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.chain_list.remove(chain)\n del self.chain_dict[chain.chain_id]\n chain.model = None", "def remove_chain(self, name, wrap=True):\n if wrap:\n chain_set = self.chains\n else:\n chain_set = self.unwrapped_chains\n\n if name not in chain_set:\n return\n\n self.dirty = True\n\n # non-wrapped chains and rules need to be dealt with specially,\n # so we keep a list of them to be iterated over in apply()\n if not wrap:\n self.remove_chains.add(name)\n chain_set.remove(name)\n if not wrap:\n self.remove_rules += filter(lambda r: r.chain == name, self.rules)\n self.rules = filter(lambda r: r.chain != name, self.rules)\n\n if wrap:\n jump_snippet = '-j %s-%s' % (binary_name, name)\n else:\n jump_snippet = '-j %s' % (name,)\n\n if not wrap:\n self.remove_rules += filter(lambda r: jump_snippet in r.rule,\n self.rules)\n self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)", "def removeChain(self, mychain):\n\n\t\tichain = self.getChain(mychain)\t\n\t\tif ichain == None:\n\t\t\treturn\n\n\t\tself.chain.remove(ichain)", "def empty_chain(self, chain, wrap=True):\n chained_rules = [rule for rule in self.rules\n if rule.chain == chain and rule.wrap == wrap]\n if chained_rules:\n self.dirty = True\n for rule in chained_rules:\n self.rules.remove(rule)", "def delete_rule(self, index):\n del self.rules[index]", "def remove_rule(self, i):\n self.cliques.pop(i)", "def _remove_rule(cls, rule_suffix: str) -> None:\n if cls._does_rule_exist(rule_suffix):\n cls._delete_rule(rule_suffix)", "def delete_snat_rule(self, rule, ignore_missing=True):\n return self._delete(_snat.Rule, rule, ignore_missing=ignore_missing)", "def detach_rule(self, detach_rule):\n\n self._detach_rule = detach_rule", "def delete_rule(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-D\"] + rule\n return call(cmdline)", "def remove_atom(self, atom):\n assert isinstance(atom, Atom)\n assert atom.model_id == self.model_id \n self.chain_dict[atom.chain_id].remove_atom(atom)", "def remove_rule(self, rule_number):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tif len(self._blacklist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\trule = self._blacklist_rules.pop(rule_number)\n\t\t\tself._log.info('Removing rule from the blacklist rules set: %s' % rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tif len(self._whitelist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\trule = self._whitelist_rules.pop(rule_number)\n\t\t\tself._log.info('Removing rule from the whitelist rules set: %s' % rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()\n\t\treturn rule", "def remove_ruleset(args, rulesengine_db):\n import os\n from src.praxxis.sqlite import sqlite_rulesengine\n from src.praxxis.rulesengine import rules\n\n if hasattr(args, \"name\"):\n name = args.name\n else:\n name = args\n\n name = rules.get_ruleset_by_ordinal(name, rulesengine_db)\n\n path = sqlite_rulesengine.get_ruleset_path(rulesengine_db, name)\n\n if os.path.isfile(path):\n os.remove(path)\n sqlite_rulesengine.remove_ruleset(rulesengine_db, name)\n else:\n from src.praxxis.util import error\n raise error.RulesetNotFoundError(name)\n\n return name", "def remove_policy(self, sec, ptype, rule):\n line = self.convert_to_item(ptype, rule)\n\n _id = line['id']['S']\n\n self.dynamodb.delete_item(\n Key={\n 'id': {\n 'S': _id,\n }\n },\n TableName=self.table_name,\n )\n\n return True", "def delete_resolver_rule(ResolverRuleId=None):\n pass", "def remove(self, *args):\n return _libsbml.ListOfRules_remove(self, *args)", "def remove_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n target_rule = None\r\n for rule in self.rules:\r\n if rule.ip_protocol == ip_protocol:\r\n if rule.from_port == from_port:\r\n if rule.to_port == to_port:\r\n target_rule = rule\r\n target_grant = None\r\n for grant in rule.grants:\r\n if grant.name == src_group_name:\r\n if grant.owner_id == src_group_owner_id:\r\n if grant.cidr_ip == cidr_ip:\r\n target_grant = grant\r\n if target_grant:\r\n rule.grants.remove(target_grant)\r\n if len(rule.grants) == 0:\r\n self.rules.remove(target_rule)", "def remove_ruleset(command):\n namespace = app.main(command)\n assert namespace.command == 'rr' or namespace.command == \"removeruleset\"\n assert namespace.name == \"test\"", "def removeRuleByVariable(self, *args):\n return _libsbml.Model_removeRuleByVariable(self, *args)", "def remove_phase_from_killchain(self, killchain):\n phase = request.json['phase']\n kc = Setting.find(name=killchain)\n if not kc:\n return '', 404\n try:\n kc.remove_phase_from_killchain(phase)\n except RuntimeException as exception:\n return exception, 400\n return kc.get_killchain()", "def remove_chain(self, chain, color, current_state):\r\n for position in self.chains[(chain, color)]:\r\n current_state[position[0]][position[1]] = 0\r\n return current_state", "def delete_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n\n del self.rules[rule_name]\n del self.rule_source[rule_name]\n del self.rule_str[rule_name]\n del self.rule_ft[rule_name]\n\n return True", "def remove_random_rule(self):\n\n\t\ta = self.get_random_cell()\n\t\ta.remove_ProductRule(a.get_random_rule())", "def remove_callback(self, chain):\n rem_path = self._to_path(chain)\n assert os.path.isdir(rem_path),\\\n \"Requested removal of non-existent dir {}\".format(rem_path)\n shutil.rmtree(rem_path)", "def _delete_rule(cls, rule_suffix: str) -> None:\n delete_rule = cls._build_rule_string(IpTableCommandOption.DELETE, rule_suffix)\n log.info('Delete rule \"%s\"', delete_rule)\n utils.run_command(delete_rule, shell=True)", "def delete_metering_label_rule(self, rule):\r\n return self.delete(self.metering_label_rule_path % (rule))", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "def removepredicate(self, pred):\n self._preds.remove(pred)" ]
[ "0.7424913", "0.74110746", "0.73149914", "0.6947874", "0.6896175", "0.6762595", "0.6747168", "0.66540754", "0.65364784", "0.65273917", "0.6346213", "0.6327616", "0.6307949", "0.62569445", "0.6254516", "0.62401545", "0.62105507", "0.61818945", "0.613633", "0.61205137", "0.611331", "0.60924107", "0.6081206", "0.6020177", "0.59308904", "0.59013444", "0.5849027", "0.58295745", "0.5809589", "0.58011854" ]
0.8184237
0
Remove all rules matching regex.
def remove_rules_regex(self, regex): if isinstance(regex, six.string_types): regex = re.compile(regex) num_rules = len(self.rules) self.rules = filter(lambda r: not regex.match(str(r)), self.rules) removed = num_rules - len(self.rules) if removed > 0: self.dirty = True return removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]", "def _remove_regex(regex, text) -> StyledStr:\n text = str(text)\n if NO_COLOR:\n return StyledStr(text)\n return StyledStr(re.sub(regex, \"\", text))", "def remove_tag(self, rules):\n for rule in rules:\n [s.extract() for s in self.soup.find_all(limit=1, **rule)]", "def _detach_skill_regexes(self, skill_id):\n skill_id = _entity_skill_id(skill_id)\n\n def match_skill_regexes(regexp):\n return any([r.startswith(skill_id)\n for r in regexp.groupindex.keys()])\n\n self.engine.drop_regex_entity(match_func=match_skill_regexes)", "def toClean(self, *patterns):\n self.cleanables.extend([*patterns])", "def remove_tags(self, rules):\n for rule in rules:\n [s.extract() for s in self.soup.find_all(**rule)]", "def clean_regex(regex):\n # copy for return\n ret_regex = regex\n\n # these characters are escaped (all except alternation | and escape \\)\n # see http://www.regular-expressions.info/refquick.html\n escape_chars = '[^$.?*+(){}'\n\n # remove any escape chars\n ret_regex = ret_regex.replace('\\\\', '')\n\n # escape any characters which are used by regex\n # could probably concoct something incomprehensible using re.sub() but\n # prefer to write clear code with this loop\n # note expectation that no characters have already been escaped\n for c in escape_chars:\n ret_regex = ret_regex.replace(c, '\\\\' + c)\n\n # remove any double alternations until these don't exist any more\n while True:\n old_regex = ret_regex\n ret_regex = ret_regex.replace('||', '|')\n if old_regex == ret_regex:\n break\n\n # if last char is alternation | remove it because this\n # will cause operational error\n # this can happen as user is typing in global search box\n while len(ret_regex) >= 1 and ret_regex[-1] == '|':\n ret_regex = ret_regex[:-1]\n\n # and back to the caller\n return ret_regex", "def suppress(self):\n self.pattern = hre.begins_not_silently_grouped.sub(\"(?:\", self.pattern)\n self._compiled = None\n self.structure.clear()\n return self", "def reset(self):\n self._regex = None\n self._includes.clear()", "def clear_excludepatterns(self):\n self._excludepatterns = []", "def prune(df, regex_list):\n for regex_pattern in regex_list:\n df = df[~df.case_action.str.contains(regex_pattern)]\n return df", "def __rm_general(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for regex in COBOL_FORMAT_RM_REGEXES:\n for match in re.finditer(regex, file_contents):\n match_str = match_to_str(match)\n new_file_contents = new_file_contents.replace(match_str, '')\n\n return new_file_contents", "def remove_pattern(self, name):\n self._pattern_reg.__delitem__(name)", "def _strip_build_rules(self, obj):\n if 'build_rules' in obj:\n del obj['build_rules']\n if 'projects' in obj:\n for project in obj['projects']:\n self._strip_build_rules(project)\n if 'formats' in obj:\n for format in obj['formats']:\n self._strip_build_rules(format)\n if 'chapters' in obj:\n for chapter in obj['chapters']:\n self._strip_build_rules(chapter)", "def _remove_duplicate_rules(rule_set: _RewriteRuleSet) -> None:\n RuleKey = Tuple[str, str, str, str]\n\n def _key_and_value(rule: _RewriteRule) -> Tuple[RuleKey, _RewriteRule]:\n return (rule.from_state, rule.to_state, rule.input, rule.output), rule\n\n inverted = collections.OrderedDict(map(_key_and_value, rule_set.rule))\n duplicate_count = len(rule_set.rule) - len(inverted)\n\n if duplicate_count:\n logging.info(\n f\"found {duplicate_count} duplicate rewrite rules, removing them\")\n rule_set.ClearField(\"rule\")\n rule_set.rule.extend([r for r in inverted.values()])", "def clear_includepatterns(self):\n self._excludepatterns = []", "def remove(self, *args):\n return _libsbml.ListOfRules_remove(self, *args)", "def remove_matching(self, room, expr, user):\n room, user = str(room), str(user)\n as_pattern = re.compile(expr, re.I)\n\n to_remove = []\n\n with self._lock:\n regexes_for_room = self.notifications.get(room, {})\n for regex, users_for_regex in regexes_for_room.items():\n # check for exact match or pattern match\n if regex == expr or as_pattern.search(regex):\n if user in users_for_regex:\n to_remove.append(regex)\n\n # remove regexes after matching, to avoid mutating-while-iterating\n for regex in to_remove:\n self._remove(room, regex, user)\n\n if to_remove:\n self._save()\n\n return to_remove", "def remove_ruleset(command):\n namespace = app.main(command)\n assert namespace.command == 'rr' or namespace.command == \"removeruleset\"\n assert namespace.name == \"test\"", "def removeRule(self, *args):\n return _libsbml.Model_removeRule(self, *args)", "def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt", "def clean(ctx):\n header(clean.__doc__)\n with ctx.cd(ROOT):\n for pattern in CLEAN_PATTERNS:\n info(\"Removing {0}\", pattern)\n ctx.run(\"rm -rf {0}\".format(pattern))", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def clean(pattern=default, *, module=None):\n pattern = default.unwrap(pattern, current_config[\"clean\"])\n\n if pattern is False:\n return\n\n if module is None:\n import __main__ as module\n\n items = vars(module)\n to_delete = [key for key in items if fnmatch.fnmatchcase(key, pattern)]\n\n for key in to_delete:\n del items[key]", "def clean(c):", "def clear(self) -> None:\n self._fixup.clear()\n self._matcher = None", "def _remove_urls(text: str) -> str:\n pattern = r'(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?'\n\n return re.sub(pattern, '', text, flags=re.MULTILINE)", "def delete_rule(self, index):\n del self.rules[index]", "def unconstrain(self, regexp):\r\n matches = self.grep_param_names(regexp)\r\n\r\n # tranformed contraints:\r\n for match in matches:\r\n self.constrained_indices = [i[i <> match] for i in self.constrained_indices]\r\n\r\n # remove empty constraints\r\n tmp = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n if tmp:\r\n self.constrained_indices, self.constraints = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints)\r\n\r\n # fixed:\r\n self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices, values in zip(self.fixed_indices, self.fixed_values)]\r\n self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices]\r\n\r\n # remove empty elements\r\n tmp = [(i, v) for i, v in zip(self.fixed_indices, self.fixed_values) if len(i)]\r\n if tmp:\r\n self.fixed_indices, self.fixed_values = zip(*tmp)\r\n self.fixed_indices, self.fixed_values = list(self.fixed_indices), list(self.fixed_values)\r\n else:\r\n self.fixed_indices, self.fixed_values = [], []", "def delete_rule(self, value):\n\n if value >= 0:\n if sublime.ok_cancel_dialog('Are you sure you want to delete the rule: \\'%s\\'?' % self.keys[value]):\n del self.regex_rules[self.keys[value]]\n sublime.load_settings('reg_replace_rules.sublime-settings').set('replacements', self.regex_rules)\n sublime.save_settings('reg_replace_rules.sublime-settings')" ]
[ "0.64303595", "0.616448", "0.6031216", "0.6028709", "0.602167", "0.6004168", "0.59537715", "0.59090513", "0.5879021", "0.57283556", "0.57130945", "0.56602484", "0.56581986", "0.5656981", "0.56460613", "0.5560527", "0.55123913", "0.551106", "0.55080414", "0.550569", "0.5490603", "0.5486112", "0.5420345", "0.5417195", "0.5414023", "0.5407979", "0.54031986", "0.54029477", "0.5387969", "0.5384322" ]
0.8198133
0
Remove all rules from a chain.
def empty_chain(self, chain, wrap=True): chained_rules = [rule for rule in self.rules if rule.chain == chain and rule.wrap == wrap] if chained_rules: self.dirty = True for rule in chained_rules: self.rules.remove(rule)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.model_dict[chain.model_id].remove_chain(chain)", "def flushRules(self):\n self.chain.flush()", "def remove_chain(self, name, wrap=True):\n if wrap:\n chain_set = self.chains\n else:\n chain_set = self.unwrapped_chains\n\n if name not in chain_set:\n return\n\n self.dirty = True\n\n # non-wrapped chains and rules need to be dealt with specially,\n # so we keep a list of them to be iterated over in apply()\n if not wrap:\n self.remove_chains.add(name)\n chain_set.remove(name)\n if not wrap:\n self.remove_rules += filter(lambda r: r.chain == name, self.rules)\n self.rules = filter(lambda r: r.chain != name, self.rules)\n\n if wrap:\n jump_snippet = '-j %s-%s' % (binary_name, name)\n else:\n jump_snippet = '-j %s' % (name,)\n\n if not wrap:\n self.remove_rules += filter(lambda r: jump_snippet in r.rule,\n self.rules)\n self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)", "def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.chain_list.remove(chain)\n del self.chain_dict[chain.chain_id]\n chain.model = None", "def remove_callback(self, chain):\n for reactor in self._reactors:\n reactor.remove_callback(chain)", "def remove_all(ctx):\n skale = ctx.obj['skale']\n cnt = 0\n for sname in get_all_schains_names(skale):\n skale.manager.delete_schain(sname)\n cnt += 1\n print(f'Success. {cnt} schains were removed')", "def removeChain(self, mychain):\n\n\t\tichain = self.getChain(mychain)\t\n\t\tif ichain == None:\n\t\t\treturn\n\n\t\tself.chain.remove(ichain)", "def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]", "def strip_loan(chain):\n while chain[-1]['action'] == 'LOAN':\n chain.pop()\n\n return chain", "def chain_cleanup(chain):\n snapshot = chain.take_snapshot()\n yield\n chain.revert_to_snapshot(snapshot)", "def remove_rule(self, chain, rule, wrap=True, top=False):\n try:\n self.rules.remove(IptablesRule(chain, rule, wrap, top))\n if not wrap:\n self.remove_rules.append(IptablesRule(chain, rule, wrap, top))\n self.dirty = True\n except ValueError:\n pass", "def reset(self):\n for layer in self.network:\n layer.clean()", "def _release_chain_resources(chain: Chain):\n chain.unfit()\n gc.collect()\n return chain", "def remove_chain(self, chain, color, current_state):\r\n for position in self.chains[(chain, color)]:\r\n current_state[position[0]][position[1]] = 0\r\n return current_state", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def remove(self, *args):\n return _libsbml.ListOfRules_remove(self, *args)", "def remove_tag(self, rules):\n for rule in rules:\n [s.extract() for s in self.soup.find_all(limit=1, **rule)]", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def remove(self, *nonterminals):\n # type: (Iterable[Type[Nonterminal]]) -> None\n for nonterm in set(nonterminals):\n _NonterminalSet._control_nonterminal(nonterm)\n if nonterm not in self:\n raise KeyError('Nonterminal ' + nonterm.__name__ + ' is not inside')\n self._grammar.rules.remove(*self._assign_map[nonterm], _validate=False)\n del self._assign_map[nonterm]\n if self._grammar.start is nonterm:\n del self._grammar.start\n super().remove(nonterm)", "def disown(self):\r\n for apply_node in self.apply_nodes:\r\n del apply_node.fgraph\r\n del apply_node.deps\r\n for variable in self.variables:\r\n del variable.fgraph\r\n del variable.clients\r\n self.apply_nodes = set()\r\n self.variables = set()\r\n self.inputs = None\r\n self.outputs = None", "def remove_callback(self, chain):", "def _remove_duplicate_rules(rule_set: _RewriteRuleSet) -> None:\n RuleKey = Tuple[str, str, str, str]\n\n def _key_and_value(rule: _RewriteRule) -> Tuple[RuleKey, _RewriteRule]:\n return (rule.from_state, rule.to_state, rule.input, rule.output), rule\n\n inverted = collections.OrderedDict(map(_key_and_value, rule_set.rule))\n duplicate_count = len(rule_set.rule) - len(inverted)\n\n if duplicate_count:\n logging.info(\n f\"found {duplicate_count} duplicate rewrite rules, removing them\")\n rule_set.ClearField(\"rule\")\n rule_set.rule.extend([r for r in inverted.values()])", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def _strip_build_rules(self, obj):\n if 'build_rules' in obj:\n del obj['build_rules']\n if 'projects' in obj:\n for project in obj['projects']:\n self._strip_build_rules(project)\n if 'formats' in obj:\n for format in obj['formats']:\n self._strip_build_rules(format)\n if 'chapters' in obj:\n for chapter in obj['chapters']:\n self._strip_build_rules(chapter)", "def clear_all_triplex_loads(self):\n # Start by getting all the triplex_load objects.\n tl_list = self.get_objects_by_type(object_type='triplex_load')\n\n # If there aren't any triplex loads, warn and return.\n if tl_list is None:\n self.log.warning('clear_all_triplex_loads called, but there '\n 'are not any triplex_loads in the model!')\n return\n\n # Clear 'em out!\n for tl in tl_list:\n self.remove_properties_from_item(item_dict=tl,\n property_list=TRIPLEX_PARAMS)\n\n # All done.", "def remove_states(self, keys: list):\n if self.spec.graph:\n self.spec.graph.clear_children(keys)", "def remove_rule(self, i):\n self.cliques.pop(i)", "def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")" ]
[ "0.6976301", "0.67297375", "0.6681245", "0.6644196", "0.6428264", "0.6263099", "0.6163615", "0.6119698", "0.60656494", "0.60592926", "0.59872264", "0.59622675", "0.59342986", "0.5900307", "0.5847478", "0.58297676", "0.58181685", "0.5774099", "0.57378364", "0.5725818", "0.5698356", "0.56609124", "0.564473", "0.5635594", "0.5635594", "0.56334233", "0.5601293", "0.5596789", "0.5592495", "0.55816597" ]
0.8091868
0
Apply the current inmemory set of iptables rules. This will blow away any rules left over from previous runs of the same component of Nova, and replace them with our current set of rules. This happens atomically, thanks to iptablesrestore.
def _apply(self): s = [(iptables_save, iptables_restore, self.ipv4)] if self.use_ipv6: s += [(ip6tables_save, ip6tables_restore, self.ipv6)] for save, restore, tables in s: all_tables, _err = save() all_lines = all_tables.split('\n') for table_name, table in six.iteritems(tables): start, end = self._find_table(all_lines, table_name) all_lines[start:end] = self._modify_rules( all_lines[start:end], table, table_name) table.dirty = False restore('\n'.join(all_lines))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iptables_apply():\n\n with settings(warn_only=True):\n run(\"sudo iptables-restore < /etc/iptables.rules\")", "def update_rules():\n update_all_rules()\n return \"OK\"", "def update_all_rules():\n try:\n for i in range(1, len(RULES_FOR_BRANCHES)):\n set_next_rule_to_redis(i, database.get_next_active_rule(i))\n logging.info(\"Rules updated\")\n except Exception as e:\n logging.error(\"Exeption occured while updating all rules. {0}\".format(e))", "def flushRules(self):\n self.chain.flush()", "def process_floating_ip_nat_rules(self):\n # Clear out all iptables rules for floating ips\n self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')\n\n floating_ips = self.get_floating_ips()\n # Loop once to ensure that floating ips are configured.\n for fip in floating_ips:\n # Rebuild iptables rules for the floating ip.\n fixed = fip['fixed_ip_address']\n fip_ip = fip['floating_ip_address']\n for chain, rule in self.floating_forward_rules(fip_ip, fixed):\n self.iptables_manager.ipv4['nat'].add_rule(chain, rule,\n tag='floating_ip')\n\n self.iptables_manager.apply()", "def flush_iptables(host, chain='OUTPUT'):\n host_resource = rhevm_helpers.get_host_resource(host, config.HOSTS_PW)\n return host_resource.firewall.chain(chain).clean_rules()", "def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)", "def set_rules(rules, overwrite=True, use_conf=False):\n\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)", "def apply_ruleset(self, ruleset):\n updates = [self._get_lexicon_update(ruleset['lexicon'])]\n updates += ruleset['rules']\n self.apply_updates(updates)", "def proxy_iptables():\n\n # get proxy list from proxylb\n local('scp alpha@proxylb:proxyrotate/proxies.list .')\n if os.path.isfile('proxies.list'):\n for line in open('proxies.list'):\n ip = line.strip().split(',')[0].strip()\n env.host_string = ip\n env.user = 'alpha'\n print 'Restoring iptables rules on',ip,'...'\n run('sudo iptables-restore < /etc/iptables.rules')", "def _set_rules_mgmt(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n # worker/storage nodes request IP dynamically\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)\n\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)", "def edit_dedicated_fwl_rules(self, firewall_id, rules):\r\n mask = ('mask[networkVlan[firewallInterfaces'\r\n '[firewallContextAccessControlLists]]]')\r\n svc = self.client['Network_Vlan_Firewall']\r\n fwl = svc.getObject(id=firewall_id, mask=mask)\r\n network_vlan = fwl['networkVlan']\r\n\r\n for fwl1 in network_vlan['firewallInterfaces']:\r\n if fwl1['name'] == 'inside':\r\n continue\r\n for control_list in fwl1['firewallContextAccessControlLists']:\r\n if control_list['direction'] == 'out':\r\n continue\r\n fwl_ctx_acl_id = control_list['id']\r\n\r\n template = {\r\n 'firewallContextAccessControlListId': fwl_ctx_acl_id,\r\n 'rules': rules\r\n }\r\n\r\n svc = self.client['Network_Firewall_Update_Request']\r\n return svc.createObject(template)", "def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)", "def set_enodebd_iptables_rule():\n # Remove & Set iptable rules for exposing public ip\n # for enobeb instead of private\n cfg = load_service_config('enodebd')\n port, interface = cfg['tr069']['port'], cfg['tr069']['interface']\n enodebd_public_ip = cfg['tr069']['public_ip']\n # IPv4 only as iptables only works for IPv4. TODO: Investigate ip6tables?\n enodebd_ip = get_ip_from_if(interface, preference=IpPreference.IPV4_ONLY)\n # Incoming data from 192.88.99.142 -> enodebd address (eg 192.168.60.142)\n yield from run(get_iptables_rule(\n port, enodebd_public_ip, enodebd_ip, add=False))\n yield from run(get_iptables_rule(\n port, enodebd_public_ip, enodebd_ip, add=True))", "def add_rules(self, rules):\n self.model_sort.handler_block(self.row_reordered_signal)\n i = len(self.model)\n format_protocol_int = lambda s: 'ip' if not s else '\\n'.join(map(Operator.to_string, s))\n format_protocol = lambda s, n: '\\n'.join(set(n)) if n else format_protocol_int(s)\n format_int = lambda s: \"any\" if len(s) == 0 else '\\n'.join(map(Operator.to_string, s))\n format = lambda s, n: '\\n'.join(set(n)) if n else format_int(s)\n for r in rules:\n self.model_sort.get_model().append([r.identifier,\n r.name,\n format_protocol(r.protocol, r.protocol_name),\n format(r.ip_source, r.ip_source_name),\n format(r.port_source, r.port_source_name),\n format(r.ip_dest, r.ip_dest_name),\n format(r.port_dest, r.port_dest_name),\n r.action.to_string(),\n r.action.get_action_color(),\n '#FFFFFF' if i % 2 == 0 else '#DCDCDC'])\n i += 1\n self.model_sort.handler_unblock(self.row_reordered_signal)", "def unset_ip_routing(self):\n os_type = os.getenv('server_os_type', None)\n if self.remote is not True and os_type not in ['Linux']:\n return\n self.log_output('Unsetting IP forwarding and iptables rules on {} host'.format(\n os_type))\n\n command = (\n \"echo '{0}' | sudo -S iptables -F && \"\n \"echo '{0}' | sudo -S iptables -X && \"\n \"echo '{0}' | sudo -S iptables -t nat -F && \"\n \"echo '{0}' | sudo -S iptables -t nat -X && \"\n \"echo '{0}' | sudo -S iptables -t mangle -F && \"\n \"echo '{0}' | sudo -S iptables -t mangle -X && \"\n \"echo '{0}' | sudo -S iptables -P INPUT ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P FORWARD ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P OUTPUT ACCEPT && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.ip_forward=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv6.conf.all.forwarding=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.conf.all.send_redirects=1\"\n )\n self.run_command(command.format(self.ssh_password))", "def update_acc_by_rules(self) -> None:\n for rule, coeff in self.rules.items():\n acc_delta = rule(self) # can't call self.rule\n self.update_acc(acc_delta, coeff)", "def reorder_rules(self):\n new_order = sorted(self.rules, key=attrgetter(\"pci_order\"))\n for idx, r in enumerate(new_order):\n r.dev_rename(\"%s%s\" % (r.dev_name_prefix, idx))", "def edit_standard_fwl_rules(self, firewall_id, rules):\r\n rule_svc = self.client['Network_Firewall_Update_Request']\r\n template = {\r\n \"networkComponentFirewallId\": firewall_id,\r\n \"rules\": rules}\r\n\r\n return rule_svc.createObject(template)", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def set_device_rules(self, rules, rule_objs):\n self.logger.debug(\"set_device_rules: rules: {}\".format(rules))\n self._load_device_rules(rules, rule_objs=rule_objs)\n self._determine_cli_command_list()\n self._determine_get_method_list()", "def add_rules(self, rules: List[Rule]):\n self.rules.extend(rules)", "def apply_rule_group(client, firewall_rule, aws_configs):\n\n stateless_rules = []\n\n name = aws_configs[\"rule_group\"]\n priority = aws_configs[\"priority_start\"]\n capacity = 1\n\n sources = [\n {\"AddressDefinition\": cidr} for cidr in firewall_rule.cidrs\n ]\n\n sources_capacity = len(sources) if len(sources) > 0 else 1\n protocols_capacity = len(firewall_rule.protocol_ports) if len(firewall_rule.protocol_ports) > 0 else 1\n\n # I don't understand this, but it seems to work\n capacity *= sources_capacity * protocols_capacity\n\n for protocol, ports in firewall_rule.protocol_ports.items():\n ports_capacity = len(ports) if len(ports) > 0 else 1\n capacity *= ports_capacity\n port_ranges = []\n for port_range in ports:\n port_split = port_range.split(\"-\")\n port_ranges.append(\n {\n \"FromPort\": int(port_split[0]),\n \"ToPort\": int(port_split[-1])\n }\n )\n\n rule = {\n \"Priority\": priority,\n \"RuleDefinition\": {\n \"Actions\": [actions_map[firewall_rule.action]],\n \"MatchAttributes\": {\n \"Sources\": sources,\n \"DestinationPorts\": port_ranges,\n \"Protocols\": [protocol_map[protocol]]\n }\n }\n }\n stateless_rules.append(rule)\n priority += aws_configs[\"priority_jump\"]\n\n if \"add_to_capacity\" in aws_configs:\n capacity += aws_configs[\"add_to_capacity\"]\n\n # Check if rule group exists and updates it\n try:\n get_response = get_rule_group(client, name)\n print(f\"AWS Firewall rule group {name} exists. Updating...\")\n update_token = get_response[\"UpdateToken\"]\n response = client.update_rule_group(\n UpdateToken=update_token,\n RuleGroupName=name,\n Type=\"STATELESS\",\n RuleGroup={\n \"RulesSource\": {\n \"StatelessRulesAndCustomActions\": {\n \"StatelessRules\": stateless_rules\n }\n }\n }\n )\n return response\n except client.exceptions.ResourceNotFoundException:\n print(f\"Creating AWS Firewall rule group {name}...\")\n\n response = client.create_rule_group(\n Capacity=capacity,\n Type=\"STATELESS\",\n RuleGroupName=name,\n RuleGroup={\n \"RulesSource\": {\n \"StatelessRulesAndCustomActions\": {\n \"StatelessRules\": stateless_rules\n }\n }\n }\n )\n return response", "def unblockAll():\n result = subprocess.Popen(\"/sbin/iptables -F INPUT 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not flush INPUT chain. Error: %s.\" % (result))\n result = subprocess.Popen(\"/usr/sbin/ipset destroy 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy all ipsets. Error: %s.\" % (result))\n sys.exit(255)", "def _set_rules_storage(self, gnp_config, network, host):\n\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)", "def reorder_udev_rules(self):\n self.udev.reorder_rules()", "def _set_rules_admin(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)", "def _refresh(self, iface):\n\n self._ensure_drop(iface)\n\n new = set(self._resolve(self.whitelist))\n old = set(self._resolve(\n rule[-2] for rule in self._list()\n if rule[-1] == iface and rule[1] == 'ACCEPT'))\n\n for ip in new - old:\n self.insert(1, ip, iface, 'ACCEPT')\n\n for ip in old - new:\n self.delete(ip, iface, 'ACCEPT')", "def add_nat_rules(self) -> None:\n log.info(\"Adding nat rules for interfaces %s\", self._input_interfaces)\n\n for output_interface in self._get_default_interfaces():\n self._add_rule(self._build_nat_string(output_interface))\n for input_interface in self._input_interfaces:\n self._add_rule(self._build_mark_string(input_interface))", "def updateNetworkSwitchAccessControlLists(self, networkId: str, rules: list):\n\n kwargs = locals()\n\n metadata = {\n 'tags': ['switch', 'configure', 'accessControlLists'],\n 'operation': 'updateNetworkSwitchAccessControlLists',\n }\n resource = f'/networks/{networkId}/switch/accessControlLists'\n\n body_params = ['rules']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)" ]
[ "0.74310803", "0.6443475", "0.6198638", "0.61369956", "0.58547723", "0.5850812", "0.575473", "0.57367265", "0.5676085", "0.5482839", "0.5477452", "0.5454835", "0.5432685", "0.5365001", "0.53534245", "0.53432953", "0.5276151", "0.5262996", "0.5246209", "0.52433085", "0.5233582", "0.5231907", "0.5147911", "0.5135812", "0.5132816", "0.51302904", "0.51116717", "0.5099458", "0.50958717", "0.5091382" ]
0.7693331
0
Instantiates a finite grid. The limits are specified as a list of tuples of (low, high) values, one for each grid vector.
def __init__(self, origin, grid_vectors, limits): assert len(grid_vectors) == len(limits) Grid.__init__(self, origin, grid_vectors) self._Limits = limits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid(gmin, gmax, gstep):\n n_vals = int((gmax - gmin)/gstep + 1)\n my_grid = linspace(gmin, gmax, n_vals)\n return my_grid", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def make_grid(data=None, xmin=-5, xmax=5, ymin=-5, ymax=5, n_points = 400):\n if data is not None:\n xmin, ymin = np.min(data, axis = 0)\n xmax, ymax = np.max(data, axis = 0)\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n\n x, y = np.meshgrid(np.linspace(xmin, xmax, n_points), np.linspace(ymin, ymax, n_points))\n grid = np.c_[x.ravel(), y.ravel()] # grid has n_points ^2 row and 2 columns\n return x, y, grid", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)", "def create_uniform_grid(low, high, bins=(10, 10)):\n grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1]\n for dim in range(len(bins))]\n\n return grid", "def __init__(self, limits, resolution):\n self.limits = limits\n self.resolution = resolution\n self.X, self.Y = self._create_meshgrid()\n self.coords, self.tree = self._generate_coords()\n self.fitness_function = self._calculate_fitness().reshape(self.resolution, self.resolution)\n self.max, self.min = np.max(self.fitness_function), np.min(self.fitness_function)", "def _create_meshgrid(self):\n x = np.linspace(self.limits[0], self.limits[1], self.resolution)\n y = np.linspace(self.limits[2], self.limits[3], self.resolution)\n X, Y = np.meshgrid(x, y)\n return X, Y", "def grid(min_val, max_val, step=1, *, num_dimensions=2):\n axis = itertools.takewhile(lambda x: x <= max_val,\n itertools.count(min_val, step))\n axes = itertools.tee(axis, num_dimensions)\n return itertools.product(*axes)", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def define_grid(self):\n self.h_shape = int(\n np.round((self.h_stop - self.h_start) / self.h_step, 2)) + 1\n self.k_shape = int(\n np.round((self.k_stop - self.k_start) / self.k_step, 2)) + 1\n self.l_shape = int(\n np.round((self.l_stop - self.l_start) / self.l_step, 2)) + 1\n self.grid_origin = [self.h_start, self.k_start, self.l_start]\n self.grid_step = [int(np.rint(1.0/self.h_step)),\n int(np.rint(1.0/self.k_step)),\n int(np.rint(1.0/self.l_step))]\n self.grid_shape = [self.h_shape, self.k_shape, self.l_shape]\n self.grid_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "def _createGrid(self, dimensions, density):\n import math\n\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n hsteps = math.ceil((xmax - xmin)/density)\n vsteps = math.ceil((imax - imin)/density)\n\n hgrids = int(math.ceil(hsteps/self.gridsize))\n vgrids = int(math.ceil(vsteps/self.gridsize))\n\n grid_inc = density * self.gridsize\n \n #Add one inside the range() because you want to include the last one\n horizontal = [[xmin + (x * grid_inc), xmin + ((x+1) * grid_inc)] for x in range(hgrids)]\n vertical = [[imin + (im * grid_inc), imin + ((im+1) * grid_inc)] for im in range(vgrids)]\n\n #This makes the negative to positive less confusing, positive is at index = 0\n vertical.reverse()\n\n grid_map = []\n\n for im in vertical:\n temp = []\n for x in horizontal:\n my_x = list(x)\n my_x.extend(im)\n temp.append(my_x)\n grid_map.append(temp)\n\n return grid_map", "def makeGrid(self):\n self.h = self.step_x\n self.k = self.step_t\n self.t, self.x = np.meshgrid(np.arange(self.min_t, self.max_t, self.step_t), np.arange(self.min_x, self.max_x\n , self.step_x))", "def make_NM08_grid(work_dir, log_base, max_range):\n base_name = 'NM08'\n dat = fdata.fdata(work_dir=work_dir)\n dat.files.root = base_name\n pad_1 = [1500., 1500.]\n # Symmetric grid in x-y\n base = log_base\n dx = pad_1[0]\n x1 = dx ** (1 - base) * np.linspace(0, dx, max_range) ** base\n X = np.sort(list(pad_1[0] - x1) + list(pad_1[0] + x1)[1:] + [pad_1[0]])\n # If no. z nodes > 100, temperature_gradient will not like it...\n surface_deps = np.linspace(350, -750, 4)\n cap_grid = np.linspace(-750, -1200, 4)\n perm_zone = np.linspace(-1200., -2100., 30)\n lower_reservoir = np.linspace(-2100, -3100, 10)\n Z = np.sort(list(surface_deps) + list(cap_grid) + list(perm_zone)\n + list(lower_reservoir))\n dat.grid.make('{}_GRID.inp'.format(base_name), x=X, y=X, z=Z,\n full_connectivity=True)\n grid_dims = [3000., 3000.] # 5x7x5 km grid\n # Geology time\n dat.new_zone(1, 'suface_units', rect=[[-0.1, -0.1, 350 + 0.1],\n [grid_dims[0] + 0.1,\n grid_dims[1] + 0.1,\n -750 - 0.1]],\n permeability=[1.e-15, 1.e-15, 1.e-15], porosity=0.1,\n density=2477, specific_heat=800., conductivity=2.2)\n dat.new_zone(2, 'clay_cap', rect=[[-0.1, -0.1, -750],\n [grid_dims[0] + 0.1,\n grid_dims[1] + 0.1,\n -1200 - 0.1]],\n permeability=1.e-18, porosity=0.01, density=2500,\n specific_heat=1200., conductivity=2.2)\n return dat", "def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def make_grid(N):\n\n x = np.linspace(-2. , 2 , N)\n y = np.linspace(-2. , 2 , N)\n # two evenly spaced grids from -2 to 2\n\n return x, y", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def initialise_grid(self, y, x, starting_value):\n # Create a grid of the specified size\n self.grid = np.zeros( (y, x), np.int8, 'C')\n \n # Record the sizes in the class variables\n self.x_len = x\n self.y_len = y\n \n # Set the initial values of the array\n self.grid += starting_value", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def linear_grid(D, n = 100, min_max = (-100, 100)):\r\n\r\n g = np.linspace(min_max[0], min_max[1], n)\r\n G = np.ones((n, D))\r\n\r\n return G*g[:,None]", "def create_grid(size_x, size_y, default=None):\n return [[default for _x in range(size_y)] for _y in range(size_x)]", "def make_grid(lat_values, lon_values, np_lat, np_lon):\n \n coordsys = iris.coord_systems.RotatedGeogCS(np_lat, np_lon)\n \n latitude = iris.coords.DimCoord(lat_values,\n standard_name='latitude',\n units='degrees_north',\n coord_system=coordsys)\n longitude = iris.coords.DimCoord(lon_values, \n standard_name='longitude',\n units='degrees_east',\n coord_system=coordsys)\n\n dummy_data = numpy.zeros((len(lat_values), len(lon_values)))\n new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0), (longitude, 1)])\n \n return new_cube", "def linear_grid(D, n = 100, min_max = (-100, 100)):\n\n g = np.linspace(min_max[0], min_max[1], n)\n G = np.ones((n, D))\n\n return G*g[:,None]", "def make_lattice(min,max,lattice_vectors):\n xs = np.roll(np.arange(min[0],max[0]),max[0])\n ys = np.roll(np.arange(min[1],max[1]),max[1])\n lattice = np.dstack(np.meshgrid(xs,ys)).reshape(-1,2)\n lattice = np.matmul(lattice,lattice_vectors)\n return lattice", "def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid" ]
[ "0.6845707", "0.68128675", "0.6641913", "0.6626282", "0.64288163", "0.63951415", "0.63662404", "0.6347766", "0.63329864", "0.63275075", "0.63241917", "0.63241917", "0.6310568", "0.6308185", "0.6199303", "0.6160781", "0.61287147", "0.60951614", "0.60779905", "0.6064125", "0.6000892", "0.59769756", "0.5959832", "0.5955469", "0.59352773", "0.59234166", "0.59065956", "0.5897886", "0.5889616", "0.5882854" ]
0.6917152
0
Returns the number of grid intervals in each direction.
def grid_point_count(self): return pytools.product(self.grid_point_counts())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def getNumGrids(self):\n c = list(self.gridVars.keys())\n return len(list(self.gridVars[c[0]].values()))", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def island_perimeter(grid):\n count = 0\n for j, r in enumerate(grid):\n for i, c in enumerate(r):\n if c == 1:\n if j == 0 or grid[j - 1][i] == 0:\n count += 1\n if i == 0 or grid[j][i - 1] == 0:\n count += 1\n if j == len(grid) - 1 or grid[j + 1][i] == 0:\n count += 1\n if i == len(r) - 1 or grid[j][i + 1] == 0:\n count += 1\n return count", "def getNumTiles(self):\n return self.w * self.h", "def island_perimeter(grid):\n\n counter = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if (grid[i][j] == 1):\n if ((j + 1) == len(grid[i]) or (grid[i][j + 1] == 0)):\n counter += 1\n if ((j - 1) < 0 or (grid[i][j - 1] == 0)):\n counter += 1\n if ((i + 1) == len(grid) or (grid[i + 1][j] == 0)):\n counter += 1\n if ((i - 1) < 0 or (grid[i - 1][j] == 0)):\n counter += 1\n return counter", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def island_perimeter(grid):\n count = 0\n for row in grid:\n size = len(row)\n row.insert(0, 0)\n row.append(0)\n grid.insert(0, [0 for x in range(size + 2)])\n grid.append([0 for x in range(size + 2)])\n\n for e, row in enumerate(grid):\n for i, num in enumerate(row):\n if num == 1:\n if grid[e][i - 1] != 1:\n count += 1\n if grid[e][i + 1] != 1:\n count += 1\n if grid[e - 1][i] != 1:\n count += 1\n if grid[e + 1][i] != 1:\n count += 1\n return count", "def grid_point_counts(self):\n return [high-low for low, high in self._Limits]", "def getNumTiles(self):\n return (self.width) * (self.height)", "def island_perimeter(grid):\n total = 0\n for x in range(0, len(grid)):\n for y in range(0, len(grid[0])):\n if grid[x][y] == 1:\n if x == 0 or grid[x - 1][y] == 0:\n total += 1\n if x == len(grid) - 1 or grid[x + 1][y] == 0:\n total += 1\n if y == len(grid[0]) - 1 or grid[x][y + 1] == 0:\n total += 1\n if y == 0 or grid[x][y - 1] == 0:\n total += 1\n return total", "def getNumTiles(self):\n return self.height * self.width", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def num_nodes(self):\n return self._grid", "def island_perimeter(grid):\n\n count = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n mul = 4\n if grid[i][j] == 1:\n if j < len(grid[0]) - 1:\n if grid[i][j + 1] == 1:\n mul -= 1\n if grid[i][j - 1] == 1 and j > 0:\n mul -= 1\n if i < len(grid) - 1:\n if grid[i + 1][j] == 1:\n mul -= 1\n if grid[i - 1][j] == 1 and i > 0:\n mul -= 1\n else:\n continue\n count += mul\n return count", "def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0", "def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def ncells(self):\n return self.izone.size", "def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def recursive_grid_count(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return 1\n if x == 1:\n return recursive_grid_count(x, y-1) + y\n if y == 1:\n return recursive_grid_count(x-1, y) + x\n return recursive_grid_count(x-1, y) + recursive_grid_count(x, y-1) - recursive_grid_count(x-1, y-1) + x * y", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total" ]
[ "0.72868717", "0.72172505", "0.71965694", "0.7155208", "0.70913154", "0.7022597", "0.6997481", "0.6893928", "0.68832994", "0.6877992", "0.6820697", "0.68062395", "0.6802767", "0.679377", "0.6784351", "0.6742068", "0.67137444", "0.6681955", "0.66590667", "0.6619721", "0.6619455", "0.660445", "0.6597818", "0.6597744", "0.65953463", "0.6592812", "0.65905315", "0.65870744", "0.6576417", "0.6556205" ]
0.74237925
0
Const method for initializing the applet
def init(self): # Configuration interface support comes with plasma self.setHasConfigurationInterface(False) # Aspect ratio defined in Plasma self.setAspectRatioMode(Plasma.IgnoreAspectRatio) # Theme is a const variable holds Applet Theme self.theme = Plasma.Svg(self) # It gets default plasma theme's background self.theme.setImagePath("widgets/background") # Resize current theme as applet size self.theme.resize(self.size()) self.mainWidget = None self.layout = None self.initPlasmoid()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init():", "def init():\n pass", "def do_init(self):\n\n pass", "def Init(self, config):\r\n pass", "def initialize(self, application):", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)", "def init(self) -> None:", "def initialize(self, args):\n\t\tpass", "def init(self) -> None:\n ...", "def __init__(self):\n\n self.logger = utils.get_logger()\n\n # set constants\n constants = models.get_asset_dicts('preferences')\n for key, value in constants.items():\n setattr(self, key, value)", "def init():\n safe_call(backend.get().af_init())", "def initialize(self):\n\t\tpass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def initialise(self):", "def initialization_call(self) -> global___Snippet.ClientCall:", "def initialization_call(self) -> global___Snippet.ClientCall:", "def initialization_call(self) -> global___Snippet.ClientCall:", "def initialize(self):\r\n pass" ]
[ "0.6560639", "0.64750546", "0.63915706", "0.62873834", "0.62607515", "0.621026", "0.621026", "0.621026", "0.621026", "0.621026", "0.621026", "0.621026", "0.621026", "0.619076", "0.6180176", "0.6179248", "0.61707896", "0.6163452", "0.6160528", "0.6155465", "0.6152029", "0.6152029", "0.6152029", "0.6137354", "0.6137354", "0.6106597", "0.6103727", "0.6103727", "0.6103727", "0.6078534" ]
0.7120899
0
Adds a data point to the logger object. Datapoints are added sequentially, so add your variables in the same sequence that you want them to show up in on the CSV
def addDataPoint(self, variableName): if self.initialized == False: if str(variableName) in self.currentLog: raise IndexError("datapoiont already initialized") else: self.variables += 1 self.variableDescriptions.append(variableName) self.currentLog[variableName] = None else: raise IndexError("file already initialized!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recordVariable(self, variableName, data):\n if str(variableName) in self.currentLog:\n # if self.currentLog[str(variableName)] != None:\n # raise Warning(f'data point {str(variableName)} is being overwritten!')\n self.currentLog[str(variableName)] = data\n else:\n raise IndexError(\"datapoint not initialized\")", "def _add_log_data(self, data):\n self.solver._notify_new_log(data)\n if self.log_enabled:\n if self.log_print:\n write_checking_unicode_errors(self.log_output, data)\n self.log_output.flush()\n if self.log_data is not None:\n self.log_data.append(data)\n # Update statistics\n self.process_infos.incr(CpoProcessInfos.TOTAL_LOG_DATA_SIZE, len(data))", "def addDataPoints(self):\n pass", "def record(self, point):\n for var, value in zip(self.varnames, self.f(point)):\n self.samples[var].append(value)\n return self", "def _record(self):\r\n self._plot()\r\n self._csvWriter()\r\n self._logger()", "def addPHdata(flight_data):\n\n global flight_data_log\n\n flight_data_log[flight_data['timestamp']] = flight_data", "def addDataPoint(self, index, row):\n\t\tregex = re.compile('^[a-zA-Z]+')\n\n\t\t# If there's a header\n\t\tif regex.match(row[0]) and self.headerSize > 0:\n\t\t\tprint \"Error reading line\",lineIndex\n\t\telif regex.match(row[0]) and self.headerSize == 0:\n\t\t\tself.header = row\n\t\t\tself.headerSize = len(self.header)\n\t\telse:\n\t\t\tdp = DataPoint(row, self.header)\n\t\t\tself.dataPoints.append(dp)", "def add_log(self, log):\n try:\n if log.name in self.existing_logs:\n raise Exception(\"A log with the name already exists\")\n # if len(log) == 0:\n if not log:\n raise Exception(\"No valid data in log\")\n if self.__len__() < len(log):\n raise Exception(\"length does not match\")\n # add new row to curves table\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM curves\")\n index = cur.fetchone()[0] + 1\n curvesTuple = (index, log.name, log.units, log.descr)\n cur.execute(\"INSERT INTO curves VALUES (?, ?, ?, ?)\",\n curvesTuple)\n # add new column to data table\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"ALTER TABLE data \\\n ADD COLUMN {} REAL\".format(log.name.lower()))\n dataList = [(a,) for a in log.data]\n for de, da in zip(log.depth, dataList):\n cur.execute(\"UPDATE data \\\n SET {} = ?\\\n WHERE dept = {}\".format(\n log.name.lower(), de), da)\n except Exception as inst:\n print(inst.args[0])", "def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):\n for name, value in data_points.items():\n # Using in-sort to insert while maintaining sorted ordering.\n bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))", "def data_point(inputs: list):\n \n opv = '1'\n \n sample_id = 0\n \n timenow = strftime(\"%#m/%#d/%Y %#H:%M\")\n volts = inputs[0]\n current = inputs[1]\n power = inputs[2]\n \n data_point = [opv, sample_id, timenow, volts, current, power]\n\n if data_point == True:\n sample_id += 1\n \n return data_point", "def addDataPoints(self, ticker, buyDate, sellDate):\n\t\t# Gets the CSV giving the stock data\n\t\tcsvList = self.getCSV(ticker, buyDate, sellDate)\n\n\t\t# Parse this list and add the data points\n\t\tfor index, csv in enumerate(csvList):\n\t\t\tself.addDataPoint(index, csv)\n\n\t\t# Sort the data points into date order\n\t\tself.dataPoints.sort(key=lambda dp: dp.getDate(), reverse=False)", "def add_point(self, point, fill_auto_fields=True, timestamp=None):\n self.points.append(point)\n if fill_auto_fields:\n self.fill_auto_fields(point)\n if timestamp:\n point.timestamp = timestamp", "def append_to_csv(self):\n appended_data = pd.concat([self.existing_data, self.new_data], axis = 1)\n appended_data.to_csv(filename_main, index = False)\n warnings.warn(\"Add new graphs to .vsz files to show the new data\")", "def add(self, data):\n if data[\"topic\"] in DRONE_POS_TOPICS:\n self.drone.set_pos_val(data[\"ts\"], data[\"coord\"], data[\"value\"])\n elif data[\"topic\"] in DRONE_VEL_TOPICS:\n self.drone.set_vel_val(data[\"ts\"], data[\"coord\"], data[\"value\"])\n elif data[\"topic\"] in DRONE_ACC_TOPICS:\n self.drone.set_acc_val(data[\"ts\"], data[\"coord\"], data[\"value\"])\n elif data[\"topic\"] in SUBJECT_TOPICS:\n self.subject.set_val(data[\"ts\"], data[\"coord\"], data[\"value\"])\n elif data[\"topic\"] in self.PEDESTRIAN_TOPICS:\n self.peds[data[\"pid\"]].set_val(data[\"ts\"], data[\"coord\"], data[\"value\"])", "def addDataPoint(self, dataPoint):\r\n ## Append new data point(s) to end of array\r\n self._data = np.insert(self._data, self._data.size, dataPoint)\r\n ## Trim begining begining of array if longer than maxSize\r\n if self._data.size > self._maxSize:\r\n self._data = self._data[self._data.size - self._maxSize:]", "def _append_value(self, stream, value):\n if FLAGS.timestamp:\n x_val = float(time.time())\n stream['x'].append(x_val)\n\n y_val = float(value)\n stream['y'].append(y_val)", "def addData(self, positionData, timeStamp):\n newPos = IndividualPosition(positionData, timeStamp)\n self.positions.append(newPos)\n self.predictedXAcceleration, self.predictedYAcceleration = self.predictParams()\n newPos.car.update_predictions(self.predictedXAcceleration, self.predictedYAcceleration)\n self.latestCar = newPos.car\n self.latestTime = timeStamp", "def add_record(self, data):\n if not self._validate_columns(data):\n raise ValueError('Invalid column names')\n formatted_data = [str(data[column]) for column in self.column_names]\n utils.write_line(','.join(formatted_data) + '\\n', self.filename, 'a')", "def append(self, *data):\n super(TextDataWriter, self).append(*data)\n dline = []\n for c, d in zip(self.column_descriptions, data):\n if is_sequence(d):\n for x in d:\n dline.append(c.format(x))\n else:\n dline.append(c.format(d))\n self.fo.write(self.separator.join(dline))\n self.fo.write('\\n')", "def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)", "def InsertLog():", "def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))", "def _insert_datapoint(self):\n # Insert\n if db_datapoint.idx_datapoint_exists(1) is False:\n record = Datapoint(\n id_datapoint=general.encode(self.reserved),\n agent_label=general.encode(self.reserved),\n agent_source=general.encode(self.reserved)\n )\n database = db.Database()\n database.add(record, 1047)", "def add_entry(self, timestamp, data):\n self._normalized = self._predefinedNormalized\n self._sorted = self._predefinedSorted\n\n tsformat = self._timestampFormat\n if tsformat is not None:\n timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat)\n\n self._timeseriesData.append([float(timestamp), float(data)])", "def add_to_dataset(self, dataset: Dataset):\n pass", "def add_data_single(self, pt, val):\n self.gp_core.add_data_single(pt, val)", "def addPoint(self, *args, **kwargs):\n ...", "def add_data(self, label, description='', datapath='', samples=[], fibres=[], data_type='', date_created='', verbose = True):\n assert (self.connected)\n assert(type(label) == str)\n assert(type(datapath) == str)\n assert(type(samples) == list and len(samples) <= 4)\n assert(type(fibres) == list and len(fibres) <= 2)\n assert(type(date_created) == str)\n assert('\\n' not in label)\n assert(len(samples) <= 4)\n assert(len(fibres) <= 2)\n \n \n ADD_DATA_COMMAND = (\"INSERT INTO data \"\n \"(label,description, type, data, data_size, data_duration, data_numpoints, sampleId, sampleId2, sampleId3, sampleId4, fibreId, fibreId2, date_created) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n \n # get binary data from the file path specified\n data = None\n data_size = 0\n num_data_points = 0\n duration = 0 \n if (datapath != ''):\n data = open(datapath, 'rb').read()\n data_size = path.getsize(datapath)\n if verbose: print(\"File uploaded: \" + str(data_size / 1000.0) + \" KB\")\n \n # get metadata from .csv file\n df = pd.read_csv(datapath)\n num_data_points = len(df) \n if (len(df) > 0):\n if ('time' in df):\n duration = df['time'].values[len(df)-1] - df['time'].values[0] \n elif ('Time' in df): \n duration = df['Time'].values[len(df)-1] - df['Time'].values[0]\n else:\n duration = -1 \n \n # fill in today's date,if none was given\n if (date_created == ''):\n date_created = date.today().strftime(\"%Y-%m-%d\")\n \n # Get sample ids \n sampleIds = []\n if (len(samples)>0 and type(samples[0]) == str):\n for s in samples:\n theId = self.get_by_label(s, 'samples')\n sampleIds.append(None if theId==-1 else theId )\n elif (len(samples)>0 and type(samples[0]) == int):\n sampleIds = samples\n # Ensure sample id list if exactly 4 items long\n sampleIds = [ sampleIds[i] if i<len(sampleIds) else None for i in range(4)]\n \n \n # get fibre ids\n fibreIds = []\n if (len(fibres)>0 and type(fibres[0]) == str):\n for f in fibres:\n theId = self.get_by_label(f, 'fibres')\n fibreIds.append(None if theId==-1 else theId )\n if (len(fibres)>0 and type(fibres[0]) == int):\n fibreIds = fibres\n # Ensure fibre id list if exactly 2 items long\n fibreIds = [ fibreIds[i] if i<len(fibreIds) else None for i in range(2)]\n \n \n new_data = (label, description, data_type, data, data_size, duration, num_data_points, sampleIds[0], sampleIds[1], sampleIds[2], sampleIds[3], fibreIds[0], fibreIds[1], date_created)\n \n \n \n self.cursor.execute(ADD_DATA_COMMAND, new_data)\n \n self.cnx.commit()\n \n \n if verbose: print(\"Data added successfully\")", "def appendPoint(self, point):\n self.points.append(point)", "def add_point(self, time=None, location=None):\n\n # calculate the bounds for time and location and create or update the bounds for the coordinate axis\n # hold onto the values so you can put them in an hdf...\n\n self._element_count.value += 1\n\n assert time, 'Can not create a point without a time value'\n\n assert location and len(location) == (len(self.coordinate_axis)-1), 'Must provide the correct number of location values'\n\n #@todo add some more type checking!\n\n self._coordinates[self.coordinate_axis[0]]['records'].append(time)\n\n for ind in xrange(len(location)):\n self._coordinates[self.coordinate_axis[ind+1]]['records'].append(location[ind])\n\n return self._element_count.value -1 # the actual index into the records list" ]
[ "0.6582924", "0.6551948", "0.6459278", "0.6246846", "0.6219666", "0.6097625", "0.60864437", "0.5971872", "0.59315133", "0.58834726", "0.5853392", "0.5843727", "0.5807153", "0.57972455", "0.57885456", "0.5764661", "0.5758678", "0.5758131", "0.5752304", "0.57509875", "0.57498115", "0.57447714", "0.5741171", "0.57234454", "0.5722398", "0.5716958", "0.5696913", "0.56462586", "0.5613221", "0.56128323" ]
0.70670587
0
records a variable to the current log, DOES NOT LOG AUTOMATICALLY
def recordVariable(self, variableName, data): if str(variableName) in self.currentLog: # if self.currentLog[str(variableName)] != None: # raise Warning(f'data point {str(variableName)} is being overwritten!') self.currentLog[str(variableName)] = data else: raise IndexError("datapoint not initialized")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')", "def log_debug(var):\n\n GPS.Logger('testsuite').log(\"%s\" % (var, ))", "def logger(self, value):\n pass", "def __log__(self, val):\n if lm_settings[\"debug\"]:\n try:\n log_file = open(\"language_manager/info/language_manager.log\", \"a\")\n except FileNotFoundError:\n log_file = open(lm_settings[\"logfile\"], \"w\")\n log_file.write(val)\n log_file.close()", "def log(self, message):", "def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))", "def record(self, var_keys, value=None):\n\n for var_key in make_list(var_keys):\n\n # Create empty lists\n if 't' not in self.log:\n self.log['t'] = []\n if var_key not in self.log:\n self.log[var_key] = [None] * len(self.log['t'])\n\n if self.model.t not in self.log['t']:\n\n # Create empty slot for new documented time step\n for v in self.log.values():\n v.append(None)\n\n # Store time step\n self.log['t'][-1] = self.model.t\n\n if value is None:\n v = getattr(self, var_key)\n else:\n v = value\n\n self.log[var_key][-1] = v", "def record_vars(context, data):\n pass", "def log(self, message: str):", "def log(msg):\n\n print('datastore: %s' % msg)", "def on_a(self):\r\n self.log()", "def _add_recorder(self, variable):\n raise NotImplementedError", "def log(msg):\n print msg", "def log(self, msg):\n print(msg)", "def save_result(self, value: Any) -> None:\n self.run_logger.set_tags({self.name: value})", "def log(self, message):\n if VERBOSE:\n print self, message", "def _log(self, str):\n if self.log:\n print(str)", "def record(self, step):", "def _log_some_info(self):\n logging.info('info')", "def log_trainable_variables(self):\n var_names = list(self.trainable_variables.keys())\n self.logger.log_trainable_variables(var_names)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def logger(self, value):\n self._state[\"logger\"] = value", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def _log(self, message):\n pass", "def log(self, game: str, outcome: str):\n current_time = datetime.now()\n self.user.record.append([current_time.strftime(\"%c\"), game, outcome, self.user.balance])", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def log(message):\n if LOGPLEASE:\n logging.info(message)", "async def add_log(self, value):\n log_string = value\n print(log_string)\n self.embed.title = log_string\n self.embed.timestamp = datetime.datetime.now()\n self.embed.description = \"\"", "def logline(msg):\n print msg", "def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))" ]
[ "0.6800775", "0.67926866", "0.67613137", "0.673412", "0.66907656", "0.6625186", "0.6596223", "0.6494363", "0.6444782", "0.6422716", "0.64064705", "0.6392706", "0.6374291", "0.6330466", "0.63255125", "0.6305311", "0.6288462", "0.62827843", "0.6263191", "0.6250232", "0.6246383", "0.6221026", "0.6168215", "0.61613595", "0.6140027", "0.61336285", "0.6100201", "0.6099701", "0.6099278", "0.60853183" ]
0.70800555
0
Initializes the CSV file and prepares it for writing.
def initCSV(self, makeFile, overWrite): self.initialized = True os.chdir(os.path.dirname(os.path.abspath(__file__))) if os.path.exists(str(self.fileName)): f = open(str(self.fileName), "r") if not f.read(): f.close() f = open(str(self.fileName), "w") outString = "" for varName in self.variableDescriptions: outString += varName outString += "," f.write(outString[0:-1]) f.write('\n') else: if overWrite == True: f.close() f = open(str(self.fileName), "w") outString = "" for varName in self.variableDescriptions: outString += varName outString += "," f.write(outString[0:-1]) f.write('\n') if overWrite == False: raise OSError("csv file is not empty!") else: if makeFile == True: f = open(str(self.fileName), "w") f.close() else: raise OSError("csv file not found!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __open_csv(self):\n self.__csv_file = open(self.__csv_file_name, 'w', encoding='utf-8')\n self.__csv_writer = csv.writer(self.__csv_file, delimiter=',', )", "def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def initialize():\n if not os.path.isfile(WORK_LOG_FILENAME):\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writeheader()", "def __openAndInitCSVFile(self, modelInfo):\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def __init__(self, in_csvfile, out_csvfile, col_name, cell_filler):\r\n self.in_csvfile = in_csvfile\r\n self.out_csvfile = out_csvfile\r\n self.col_name = col_name\r\n self.cell_filler = cell_filler", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def __init__(self, csvfile, fieldnames, *args, **kwargs):\n self.encoding = kwargs.pop('encoding', 'utf-8')\n csv.DictWriter.__init__(self, csvfile, fieldnames, *args, **kwargs)", "def _maybe_init(self):\n if not self._initialized:\n progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)\n self._continuing = (\n os.path.exists(progress_file) and os.path.getsize(progress_file) > 0\n )\n self._file = open(progress_file, \"a\")\n self._csv_out = None\n self._initialized = True", "def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)", "def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)", "def initialize_headers_file(param_headers, csv_file):\n\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(param_headers)\n return;", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def init(self, force_overwrite=False):\n if not force_overwrite and os.path.exists(self._path):\n msg = 'Path `{0}` already exists'.format(self._path)\n raise RuntimeError(msg)\n\n try:\n self._archive_file = open(self._path, 'w+') # noqa: WPS515\n except OSError as ex1:\n msg = 'Could not open path `{0}` for writing: {1}'.format(\n self._path, ex1,\n )\n raise RuntimeError(msg)\n\n field_names = [\n 'timestamp',\n EventType.monitoring_started.name,\n EventType.monitoring_ended.name,\n EventType.person_entered.name,\n EventType.person_left.name,\n ]\n try:\n self._writer = csv.DictWriter(\n self._archive_file, fieldnames=field_names,\n )\n except csv.Error as ex2:\n msg = 'Error creating csv.writer: {0}'.format(ex2)\n raise RuntimeError(msg)", "def __init__(self, writer):\n super(ThreatCrowdCsvWriter, self).__init__(writer)", "def initialize_headers_filepath(param_headers, filepath):\n\n with open(filepath, 'w') as csv_file:\n initialize_headers_file(param_headers, csv_file)\n return;", "def start_rec(self):\r\n if not os.path.exists(MEASUREMENTS_DIR):\r\n os.makedirs(MEASUREMENTS_DIR)\r\n self._csv_file = open(MEASUREMENTS_DIR.child('data.csv'), 'wb')\r\n self._csv_writer = csv.DictWriter(\r\n self._csv_file, fieldnames=list(self._data.keys()))\r\n self._csv_writer.writeheader()", "def init():\n global last_datetime\n global data_file\n global last_minute\n\n # Atualiza as variáveis 'last_datetime' e 'last_minute'\n last_datetime = datetime.datetime.today()\n last_minute = last_datetime.minute\n\n # Define o diretório do arquivo\n data_file_path = 'data/' + get_date(last_datetime)\n\n # Gera o diretório\n try:\n os.makedirs(data_file_path)\n\n except FileExistsError:\n pass\n\n # Arbre o arquivo de dados\n data_file = open(\n data_file_path + '/' + get_time(last_datetime) + '.csv', 'w'\n )", "def __init__(self, trace, directory):\n #Public attributes\n self._trace = trace\n self._file_name = directory + '/trace.csv'\n \n #Private attributes\n self._writer = None", "def __init__(self, product_name, title, url, csv_file_name):\n # this dictionary will be used to save data in csv file\n self.__values = {'product_name': product_name, 'title': title, 'url': url}\n # __csv_fields make save_data() method writes correctly in csv file.\n self.__csv_fields = self.__values.keys()\n self.__csv_file_name = csv_file_name", "def __init__(self, *, csv_file_path: str = ''):\n self.__csv_file_path = csv_file_path\n self._parse_csv()", "def __close_csv(self):\n if self.__csv_file is not None:\n try:\n self.__csv_file.close()\n except IOError:\n pass\n self.__csv_file = None", "def __init__(self, filename, column_names, overwrite=False):\n self.filename = filename\n self.column_names = column_names\n\n if os.path.exists(self.filename) and overwrite is False:\n logging.info('Appending data to file {}'.format(self.filename))\n self._validate_header(utils.read_line(self.filename),\n self.column_names)\n else:\n utils.write_line(','.join(column_names) + '\\n', self.filename, 'w')", "def __init__(self, path=None):\n super().__init__(path=path)\n self.path += '{}.csv'", "def exportCSV(self, log, csvFile):\n return 0", "def _write(self):\n # Reload\n with portalocker.Lock(self.filename, 'w') as fh:\n self.data.to_csv(fh, index=False)\n fh.flush()\n os.fsync(fh.fileno())", "def __init__(self, csv_path, column_types=None, set_columns=False, file_headers=True, encoding=\"utf-8-sig\",\n missing_to_zero=False, print_warnings=True):\n\n self.file_path = Path(csv_path)\n self.file_name = self.file_path.stem\n\n self._file_headings = file_headers\n self._encoding = encoding\n\n self.headers = self._extract_headers()\n self.row_length = len(self.headers)\n\n self.missing_to_zero = missing_to_zero\n self.print_warnings = print_warnings\n self.invalid_typed = []\n\n self.column_types = self._determine_column_types(column_types)\n self.row_data, self.column_data, self.column_length = self._set_data(set_columns)\n\n # Old definitions kept for legacy, but new names added for clarity\n self.num_cols = self.row_length\n self.num_rows = self.column_length\n\n if len(self.invalid_typed) > 0 and self.print_warnings:\n print(f\"Warning: The following column-row-value-type where not correct so loaded as strings:\\n\"\n f\"{sorted(self.invalid_typed)}\")" ]
[ "0.76741135", "0.74255097", "0.7329993", "0.7243945", "0.7048726", "0.7034279", "0.6956366", "0.673258", "0.67240673", "0.6676092", "0.6603313", "0.6579222", "0.65732867", "0.65732867", "0.6540106", "0.6471683", "0.6458733", "0.64487153", "0.64303035", "0.6368172", "0.63498974", "0.6336909", "0.6267665", "0.62241864", "0.6211363", "0.6167199", "0.6155283", "0.61397797", "0.6120292", "0.61114293" ]
0.78837293
0
Test stripping the line
def test_line_strip(): for _x in range(100): l_str = " ".join([random_str(5, 10) for x in range(30)]) l_str = (" " * randint(0, 10)) + l_str + (" " * randint(0, 10)) line = Line(l_str, random_str(10, 20), randint(1, 10000)) # Strip the string l_stripped = line.strip() assert l_stripped == l_str.strip() assert isinstance(l_stripped, Line) assert l_stripped.file == line.file assert l_stripped.number == line.number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def rstrip_line(line):\n return line.rstrip()", "def stripline(line, stripboth=False):\n if stripboth:\n return line.lstrip().rstrip()\n return line.rstrip()", "def skip_line(line):\n return IGNORE_LINES.search(line) is not None", "def strip_line(line):\n line = line.strip()\n line = line.rstrip('\\n')\n line = line.rstrip('\\t')\n line = (line.split(\"//\"))[0]\n return line", "def IgnoreLine(self, str):\n if not str.strip(): return True\n else: return str.startswith('==') or str.startswith('**')", "def _strip_lines(lines):\n for line in lines:\n stripped = line.strip()\n if stripped:\n yield stripped", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def dealFirstLine(line):\n\n print \"%s\" % (line.strip('\\n'))", "def filter_line(line:str) -> bool:\n fails = is_short_sentence(line, MIN_LINE_LENGTH)\n\n return not fails", "def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"UCAGRYU\")\n r._data[0] = 31\n r._data[2] = 55\n self.assertEqual(r.strip_bad(), \"CGRYU\")", "def ignores_line(self, line):\n # Ignore empty lines stemming from only a line break.\n if not line.strip():\n # Yes, ignore the line if it's empty.\n return True\n # Either a `_SRE_Match` instance or `None`\n match = self._total_regex.search(line)\n return bool(match)", "def _rstrip(line, JUNK='\\n \\t'):\r\n\r\n i = len(line)\r\n while i > 0 and line[i-1] in JUNK:\r\n i -= 1\r\n return line[:i]", "def skip_if_emptystring(line):\n if line.isspace():\n return None\n else:\n return line", "def emptyline(self):", "def _test_line(\n self, line, manager_data=None\n ): # pylint: disable=too-many-branches # pragma: no cover\n\n if PyFunceble.CONFIGURATION[\"db_type\"] == \"json\" and manager_data is not None:\n autocontinue = AutoContinue(self.file, parent_process=False)\n inactive_db = InactiveDB(self.file)\n mining = Mining(self.file)\n else:\n # We use the previously initiated autocontinue instance.\n autocontinue = self.autocontinue\n\n # We use the previously initiated inactive database instance.\n inactive_db = self.inactive_db\n\n # We use the previously initiated mining instance.\n mining = self.mining\n\n # We remove cariage from the given line.\n line = line.strip()\n\n if not line or line[0] == \"#\":\n # We line is a comment line.\n\n # We return None, there is nothing to test.\n return None\n\n if Regex(line, self.regex_ignore, escape=False, return_data=False).match():\n # The line match our list of elemenet\n # to ignore.\n\n # We return None, there is nothing to test.\n return None\n\n # We format the line, it's the last\n # rush before starting to filter and test.\n subject = self._format_line(line)\n\n if (\n not PyFunceble.CONFIGURATION[\"local\"]\n and PyFunceble.Check(subject).is_reserved_ipv4()\n ):\n # * We are not testing for local components.\n # and\n # * The subject is a reserved IPv4.\n\n # We return None, there is nothing to test.\n return None\n\n if PyFunceble.CONFIGURATION[\"filter\"]:\n # We have to filter.\n\n if Regex(\n subject, PyFunceble.CONFIGURATION[\"filter\"], return_data=False\n ).match():\n # The line match the given filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n else:\n # The line does not match the given filter.\n\n # We return None.\n return None\n else:\n # We do not have to filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n\n # We add the line into the auto continue database.\n autocontinue.add(subject, status)\n\n if status.lower() in self.list_of_up_statuses:\n # The status is in the list of UP status.\n\n # We mine if necessary.\n mining.mine(subject, self.file_type)\n\n if subject in inactive_db:\n # The subject is in the inactive database.\n\n # We generate the suspicous file.\n Generate(\n subject, \"file_domain\", PyFunceble.STATUS[\"official\"][\"up\"]\n ).analytic_file(\"suspicious\")\n\n # And we remove the current subject from\n # the inactive database.\n inactive_db.remove(subject)\n else:\n # The status is not in the list of UP status.\n\n # We add the current subject into the\n # inactive database.\n inactive_db.add(subject, status)\n\n if (\n self.complements_test_started\n and PyFunceble.CONFIGURATION[\"db_type\"] == \"json\"\n ):\n # We started the test of the complements.\n\n if \"complements\" in autocontinue.database:\n # The complement index is present.\n\n while subject in autocontinue.database[\"complements\"]:\n # We loop untill the line is not present into the\n # database.\n\n # We remove the currently tested element.\n autocontinue.database[\"complements\"].remove(subject)\n\n # We save the current state.\n autocontinue.save()\n\n if manager_data is None:\n # We are not in a multiprocess environment.\n\n # We update the counters\n autocontinue.update_counters()\n\n # We process the autosaving if it is necessary.\n self.autosave.process(test_completed=False)\n elif PyFunceble.CONFIGURATION[\"db_type\"] == \"json\":\n # We are in a multiprocess environment.\n\n # We save everything we initiated into the server process\n manager_data.append(\n {\n \"autocontinue\": autocontinue.database,\n \"inactive_db\": inactive_db.database,\n \"mining\": mining.database,\n }\n )\n\n # We return None.\n return None", "def readline_strip(stream):\n assert hasattr(stream,\"read\")\n line = stream.readline()\n line = line.rstrip(\"\\n\")\n return line", "def test_read_strips(connection, reader, loop):\n reader.push(\" a b c | @#$ d \\n\")\n loop.run_until_complete(connection.connect())\n value = loop.run_until_complete(connection.read())\n assert value == \"a b c | @#$ d\"\n assert reader.has_read(\" a b c | @#$ d \\n\")", "def test_file_iterator_removes_all_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertEqual(line, line.strip())", "def is_line(self): \n return False", "def test__clean_line():\n LINES = {\n \"One morn before me were three figures seen,\":\n \"One morn before me were three figures seen,\",\n \"And once—more came they by:-alas! wherefore?\":\n \"And once more came they by: alas! wherefore?\",\n }\n for line, clean_line in LINES.items():\n assert(LineBuilder(line)._clean_line() == clean_line)", "def test_file_iterator_removes_leading_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertFalse(line.startswith(' '))", "def dealCommonline(line):\n\n print \"\\t\\t%s\" % (line.strip('\\n'))", "def FilterLine(self, a_line):\n return a_line", "def is_blank(line):\n\treturn not bool(line.strip())", "def clean(self, line):\n m = self.RE.match(line)\n if line.strip() == \">\":\n return \"\"\n elif m:\n return m.group(2)\n else:\n return line", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def clean(self, line):\r\n m = self.RE.match(line)\r\n if line.strip() == \">\":\r\n return \"\"\r\n elif m:\r\n return m.group(2)\r\n else:\r\n return line", "def write_stripped_line(fout, line):\n fout.write(line)\n fout.write('\\n')" ]
[ "0.7250562", "0.71768016", "0.7084123", "0.6787665", "0.6771485", "0.67688704", "0.6600255", "0.65247554", "0.65227586", "0.6483088", "0.64529765", "0.6417862", "0.6400912", "0.6377311", "0.6369888", "0.6362237", "0.6348878", "0.6332042", "0.62773633", "0.6241648", "0.6229595", "0.62176037", "0.61825746", "0.61601543", "0.6160119", "0.61259323", "0.61087614", "0.6088505", "0.6072698", "0.6060406" ]
0.7533331
0
Test concatenating different lines
def test_line_concat(): for _x in range(100): strings = [random_str(30, 50) for _x in range(10)] l_file = random_str(10, 20) l_num = randint(1, 10000) lines = [Line(x, l_file, l_num) for x in strings] # Concatenate the lines l_full = lines[0] for line in lines[1:]: l_full = l_full + line # Test the result assert l_full == "".join(strings) assert isinstance(l_full, Line) assert l_full.file == l_file assert l_full.number == l_num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_2_lines():\n line = \"n\" * 15 + \"\\n\" + \"n\" * 60 + \" \" + \"n\" * 10\n assert wrap_line(line) == \"n\" * 15 + \" \" + \"n\" * 60 + \"\\n\" + \"n\" * 10", "def test_writer_linebreak():\n GCMT(write=\"on\")\n write_message(100 * \"test\")\n write_message(100 * \" \")", "def test_str_magic_method():\n LINES = (\n \"One morn before me were three figures seen,\",\n \"And once more came they by:-alas! wherefore?\",\n )\n for line in LINES:\n assert(str(LineBuilder(line)) == line)", "def getMergeLine(desc_line,CC3_sample,GP2_sample):\n return desc_line.strip(\"\\n\") + \"\" + CC3_sample + \"\" + GP2_sample + \"\\n\"", "def concatenate(strings: List[str]) -> str:\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"4\")\n # END OF SOLUTION", "def concatena(*args):\n linea = ''\n for l in args:\n linea += str(l if l else '')\n return linea", "def test_space_at_the_end():\n line = \"n\" * 79 + \" \"\n print \"--%s--\" % wrap_line(line)\n assert wrap_line(line) == \"n\" * 79", "def test_extend_to_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.3\", \"3.3\"),\n after_sel=(\"3.0\", \"3.10\"),\n command_name=\"extend-to-line\",\n )", "def test_text_line_mode(self):\n outfile = cStringIO.StringIO()\n var_order = [2,1,0]\n\n \n # Write out in the order 2, 1, 0. In a normal program those constants\n # would come from an enum indicating the order in which the fields\n # appear in schema\n aggregator = lra.LineRawHandleAggregator(outfile, var_order = var_order)\n\n aggregator.map({0: 'world', 1: 'there', 2: 'hello'})\n aggregator.map({0: 'good', 1: 'is', 2: 'this'})\n\n self.assertEqual(outfile.getvalue(),\n 'INSERT\\nhello\\nthere\\nworld\\nENDINSERT\\n'\n 'INSERT\\nthis\\nis\\ngood\\nENDINSERT\\n')", "def test_basic():\n line = \"test\"\n assert wrap_line(line) == \"test\"", "def writelines(self, seq: list[str]) -> None:\n ...", "def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')", "def test_wrap_2_words():\n w1, w2 = \"n\" * 75, \"n\" * 5\n line = \"%s %s\" % (w1, w2)\n assert wrap_line(line) == \"%s\\n%s\" % (w1, w2)", "def test_string_concat():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\"Hello\" + \" \" + \"World!\")\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(2))\n # NB: We could easily decide to report only one of these\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_CONCAT_VIOLATION)))\n assert_that(visitor.violations[1][1], is_(equal_to(STRING_CONCAT_VIOLATION)))", "def test_write_qual_line_long_seq(self):\r\n\r\n demultiplexed_qual_f = FakeOutFile()\r\n qual_seq = [25, 24, 22, 24, 24, 24, 25, 30, 23, 22, 22, 24, 25,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21,\r\n 24, 22, 21, 15, 17, 17, 15, 22, 13, 11, 10, 22, 24, 27, 28, 30,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21]\r\n\r\n label_line = \"sample3_1 ABCD1234\"\r\n keep_barcode = False\r\n bc_len = 4\r\n write_qual_line(demultiplexed_qual_f, qual_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data = '>sample3_1 ABCD1234\\n24 24 25 30 23 22 22 24 25 14 25 27 29 30 14 10 1 23 24 27 28 30 22 24 21 24 22 21 15 17 17 15 22 13 11 10 22 24 27 28 30 14 25 27 29 30 14 10 1 23 24 27 28 30 22 24 21 14 25 27\\n29 30 14 10 1 23 24 27 28 30 22 24 21\\n'\r\n self.assertEqual(demultiplexed_qual_f.data, expected_data)", "def test_basic_end(self):\n self.assertLines(\n [\"-E\", \"2\", \"examples/dummy4.csv\"], [\"a,b,c\", \"1,2,3\", \"4,5,6\",]\n )", "def test_write_fasta_line(self):\r\n\r\n demultiplexed_seqs_f = FakeOutFile()\r\n fasta_seq = \"ACTAGACCTACAGGATACCATAGGACCAGATTTACA\"\r\n label_line = \"Sample1_213 ABCD1234\"\r\n keep_barcode = False\r\n bc_len = 4\r\n write_fasta_line(demultiplexed_seqs_f, fasta_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data = \">Sample1_213 ABCD1234\\nGACCTACAGGATACCATAGGACCAGATTTACA\\n\"\r\n self.assertEqual(demultiplexed_seqs_f.data, expected_data)", "def test_add_space_to_lines(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"4.6\"),\n after_sel=(\"2.0\", \"4.7\"),\n command_name=\"add-space-to-lines\",\n )", "def test_newlines(self):\n self.assertValue({\n \"foo\": \"something\\nwith\\nnewlines\",\n },\n \"foo: something_with_newlines\\n\")", "def test_LogicalLines(self) -> None:\n content = \"\"\"\nfoo \\\\\nbar \\\\\nbaz\nfoo\nbling \\\\\nbling \\\\ bling\nbling\n\"\"\"\n fobj = io.StringIO(content)\n lines = LogicalLines(fobj).readlines()\n assert lines == [\n '\\n',\n 'foo bar baz\\n',\n 'foo\\n',\n 'bling bling \\\\ bling\\n',\n 'bling\\n',\n ], lines", "def testConcatSourceMultipleButOneConcatable(self):\n env = self.env\n\n # Even if multiple input files, if only one is concat-able, won't concat.\n cs = env.ConcatSource('foo3.cc', ['a.cc', 'd.o'])\n self.assertEqual(map(str, cs), ['d.o', 'a.cc'])", "def test_insert_newline(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first li\n ne\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.8\", \"1.8\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"insert-newline\",\n )", "def test_finish_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a \n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a \n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.12\", \"3.12\"),\n after_sel=(\"3.9\", \"3.9\"),\n command_name=\"finish-of-line\",\n )", "def test_end_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"1.10\", \"1.10\"),\n command_name=\"end-of-line\",\n )", "def test_finish_of_line_2(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a \n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a \n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.1\", \"3.1\"),\n after_sel=(\"3.9\", \"3.9\"),\n command_name=\"finish-of-line\",\n )", "def test_end_of_line_internal_blank_line(self):\n before_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"end-of-line\",\n )", "def test_write_qual_line_short_seq(self):\r\n\r\n demultiplexed_qual_f = FakeOutFile()\r\n qual_seq = [25, 24, 22, 24, 24, 24, 25, 30, 23, 22, 22, 24, 25]\r\n label_line = \"sample3_1 ABCD1234\"\r\n keep_barcode = False\r\n bc_len = 4\r\n write_qual_line(demultiplexed_qual_f, qual_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data = '>sample3_1 ABCD1234\\n24 24 25 30 23 22 22 24 25\\n'\r\n\r\n self.assertEqual(demultiplexed_qual_f.data, expected_data)", "def dealCommonline(line):\n\n print \"\\t\\t%s\" % (line.strip('\\n'))", "def other_lines(line):\r\n res = \"\"\r\n for j, i in enumerate(line):\r\n res += i\r\n if j != len(line) - 1:\r\n res += '|'\r\n print(res)", "def test_end_of_line_single_char_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.1\", \"7.1\"),\n command_name=\"end-of-line\",\n )" ]
[ "0.67595047", "0.6424525", "0.6362578", "0.6216905", "0.61788297", "0.61623013", "0.61015594", "0.60790586", "0.6044751", "0.60344905", "0.6002795", "0.5997416", "0.5959322", "0.59512234", "0.5943323", "0.5936905", "0.5928723", "0.5908358", "0.5903566", "0.58914983", "0.5878602", "0.5858833", "0.5799758", "0.5789917", "0.5775693", "0.5759649", "0.5758741", "0.5740962", "0.5732776", "0.5728489" ]
0.7924718
0
Run code quality check
def codeqa(): try: sh('flake8 h5_validator') except BuildFailure: pep8_fail = True else: pep8_fail = False try: sh("pydocstyle h5_validator") except BuildFailure: docstring_fail = True else: docstring_fail = False if pep8_fail or docstring_fail: raise BuildFailure('Code Quality checks failed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)", "def test_codeanalysis():\n code = open(TEST_FILE).read()\n check_results = check_with_pyflakes(code, TEST_FILE) + \\\n check_with_pep8(code, TEST_FILE) + find_tasks(code)\n\n assert 85 <= len(check_results) <= 99", "def test_pep8_conformance_unitests(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n\n self.run_check(path)", "def check():\n \n overall_report = dict()\n\n # source code analysis\n # ====================\n # currently empty\n \n # compile\n # =======\n ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n universal_newlines=True) # use text mode for std* file objects\n overall_report['makefile'] = ret_makefile\n \n # runtime analysis\n # ================\n with open('compile.txt', 'r') as f:\n if 'error' not in f.read().lower(): # if compilation succeeded\n overall_report, test_case_report_list = runtime_analysis(config, overall_report)\n \n # pass this info to next tools for subsequent processing\n # ======================================================\n pp(overall_report)\n # results from runtime analysis\n if 'runtime_analysis_done' in overall_report:\n success_count = 0\n for report in test_case_report_list:\n if 'timeout' in report:\n util.addFinding(\"Time limit exceeded!\", 0, \"\", \"TEST_080006\")\n elif report['return_code'] != 0:\n if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something\n pass # but these findings will be added by analyze.py\n else:\n util.addFinding(\"It seems your program might have crashed.\", 0,\"\",\"TEST_100006\")\n # output_match == None means the user might have tried to print to outfile\n elif report['stdout_stream'] != '' or report['output_match'] is None:\n util.addFinding(\"A test case failed! Make sure you are not trying to print something.\",\n 0,\"\",\"TEST_100006\")\n elif not all(report['output_match']): # not all test cases passed\n util.addFinding(\"A test case failed!\", 0, \"\", \"TEST_100006\")\n else:\n success_count += 1\n\n with open('stderr.txt', 'a') as f:\n f.write(report['stderr_stream'])\n with open('stdout.txt', 'a') as f:\n f.write(report['outfile'])\n\n if success_count == len(test_case_report_list):\n util.addFinding(\"Program behaves as expected!\", 1, \"CHALLENGE_PASS\", \"TEST_900006\")\n \n util.dumpFindings()\n \n # next tools\n subprocess.run([\"./analyse.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n subprocess.run([\"./ai.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)", "def is_code_good(safe_from_bugs, ready_for_change, easy_to_understand):\n pass # your code here!", "def test_conformance_tests(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def check():", "def main():\n parser = argparse.ArgumentParser(description=\"Analyze requirement coverage\")\n parser.add_argument(\n \"project_info_path\",\n help=\"JSON file containing project information\",\n type=Path,\n )\n parser.add_argument(\n \"test_result_path\",\n help=\"XML file containing test result\",\n type=Path,\n )\n parser.add_argument(\n \"requirements_path\",\n help=\"CSV file containing requirements\",\n type=Path,\n )\n\n args = parser.parse_args()\n\n ok = analyze(args.project_info_path, args.test_result_path, args.requirements_path)\n if not ok:\n exit(1)\n else:\n exit(0)", "def check(self, runtime):", "def run(self):\n\n print('Quality script: ' + self.script)\n print('Report file: ' + self.report)\n print('Base dir: ' + self.baseDir)\n\n cont = raw_input('Are these values correct? ' + \\\n 'Press \"A\" to abbort or any other key to proceed ')\n if cont == 'A':\n sys.exit(0)\n\n for packageDir in self.packages.keys():\n localPath = os.path.join(self.baseDir, packageDir)\n # execute the quality script which produces a codeQuality.txt file\n command = self.script + ' ' + localPath\n result = getstatusoutput(command)\n for entry in result:\n print(str(entry))\n # parse the code quality file for the rating:\n reportFile = open(self.report, 'r')\n repNl = reportFile.readline()\n while repNl:\n if repNl.find('Your code has been rated at') == 0:\n relRating = repNl.split(' ')[6]\n absRating = float(relRating.split('/')[0])\n if absRating < self.threshold:\n fileRating = (str(absRating), packageDir)\n authors = self.packages[packageDir]\n if authors not in self.lowQuality:\n self.lowQuality[self.packages[packageDir]] = []\n # add the low rating\n self.lowQuality[authors].append(fileRating)\n break\n repNl = reportFile.readline()\n reportFile.close()", "def test_conformance_tests_test_heroku(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests/test_heroku.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def run_quality():\r\n\r\n # Directory to put the diff reports in.\r\n # This makes the folder if it doesn't already exist.\r\n dquality_dir = get_or_make_dir(os.path.join(Env.REPORT_DIR, \"diff_quality\"))\r\n\r\n # Generage diff-quality html report for pep8, and print to console\r\n # If pep8 reports exist, use those\r\n # Otherwise, `diff-quality` will call pep8 itself\r\n\r\n pep8_files = []\r\n for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):\r\n for f in files:\r\n if f == \"pep8.report\":\r\n pep8_files.append(os.path.join(subdir, f))\r\n\r\n pep8_reports = u' '.join(pep8_files)\r\n\r\n sh(\r\n \"diff-quality --violations=pep8 --html-report {dquality_dir}/\"\r\n \"diff_quality_pep8.html {pep8_reports}\".format(\r\n dquality_dir=dquality_dir, pep8_reports=pep8_reports)\r\n )\r\n\r\n sh(\r\n \"diff-quality --violations=pep8 {pep8_reports}\".format(\r\n pep8_reports=pep8_reports)\r\n )\r\n\r\n # Generage diff-quality html report for pylint, and print to console\r\n # If pylint reports exist, use those\r\n # Otherwise, `diff-quality` will call pylint itself\r\n\r\n pylint_files = []\r\n for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):\r\n for f in files:\r\n if f == \"pylint.report\":\r\n pylint_files.append(os.path.join(subdir, f))\r\n\r\n pylint_reports = u' '.join(pylint_files)\r\n\r\n pythonpath_prefix = (\r\n \"PYTHONPATH=$PYTHONPATH:lms:lms/djangoapps:lms/lib:cms:cms/djangoapps:cms/lib:\"\r\n \"common:common/djangoapps:common/lib\"\r\n )\r\n\r\n sh(\r\n \"{pythonpath_prefix} diff-quality --violations=pylint --html-report \"\r\n \"{dquality_dir}/diff_quality_pylint.html {pylint_reports}\".format(\r\n pythonpath_prefix=pythonpath_prefix,\r\n dquality_dir=dquality_dir,\r\n pylint_reports=pylint_reports\r\n )\r\n )\r\n\r\n sh(\r\n \"{pythonpath_prefix} diff-quality --violations=pylint {pylint_reports}\".format(\r\n pythonpath_prefix=pythonpath_prefix,\r\n pylint_reports=pylint_reports\r\n )\r\n )", "def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0", "def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))", "def test_conformance_tests_test_output(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests/test_output.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def test_pylint_score_main_script(self):\n my_dir = pathlib.Path(__file__).resolve().parent\n root_dir = my_dir.parent.parent.parent\n pylintrc = root_dir / \".pylintrc\"\n script = root_dir / \"backend\" / \"quality_report.py\"\n self.assert_pylint_score(\"{0} --rcfile {1}\".format(script, pylintrc), 10.0)", "def cc():\n load_env_vars('dev')\n from tools.static_code_analysis import CyclomaticComplexity\n radon_cc = CyclomaticComplexity()\n score = radon_cc.run_test()\n radon_cc.create_badge(score)", "def run_check(self, ctx: RunContext):\n params = ctx.get_params(\"mccabe\")\n options = ctx.options\n if options:\n params.setdefault(\"max-complexity\", options.max_complexity)\n\n McCabeChecker.max_complexity = int(params.get(\"max-complexity\", 10))\n McCabeChecker._error_tmpl = \"%r is too complex (%d)\"\n number = McCabeChecker._code\n for lineno, offset, text, _ in McCabeChecker(ctx.ast, ctx.filename).run():\n ctx.push(\n col=offset + 1,\n lnum=lineno,\n number=number,\n text=text,\n type=\"C\",\n source=\"mccabe\",\n )", "def test_run(self):\n rig_analysis_dir = \"rig_analysis\"\n analysis_root = os.path.join(self.io_args.output_root, rig_analysis_dir)\n os.makedirs(analysis_root, exist_ok=True)\n\n self.io_args.output_obj = os.path.join(analysis_root, \"final.obj\")\n self.io_args.output_equirect = os.path.join(analysis_root, \"equirect.ppm\")\n self.io_args.output_camera = os.path.join(analysis_root, \"camera.ppm\")\n self.io_args.output_camera_id = \"0\"\n self.io_args.output_cross_section = os.path.join(analysis_root, \"cross.ppm\")\n\n self.run_app(\"RigAnalyzer\")\n self.check_against_truth(\n truth=os.path.join(self.io_args.truth_dir, rig_analysis_dir),\n output=analysis_root,\n )", "def test_conformance_tests_test_videos(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests/test_videos.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def run_check(self, path):\n\n result = pycodestyle.StyleGuide().check_files(paths=[path])\n\n if result.total_errors != 0:\n self.assertEqual(\n result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def test_run_coverage(self):\n cmd = GreenTestCommand(Distribution())\n cmd.coverage = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(), Contains(\"-r\"))", "def test(coverage):\n print('success')\n pass", "def release_qa():\n lines = StringIO.StringIO(local('find . -name \"*.py\"', capture=True))\n for line in lines.readlines():\n print \"PYLINT CHECK\"\n print \"-----------------------\"\n pyfile = os.path.normpath(line).replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n \n reportfilename = pyfile.replace(\"./\", \"\").replace(\"/\", \"_\").replace(\".py\", \".txt\")\n reportpath = os.path.join(\"qa\", \"pylint\", reportfilename)\n\n options = {\"pyfile\":pyfile, \"reportpath\": reportpath}\n command = \"pylint %(pyfile)s > %(reportpath)s\" % options \n _subexec(command) \n\n print \"PEP8 CHECK\"\n print \"-----------------------\"\n reportpath = os.path.join(\"qa\", \"pep8\", reportfilename)\n options['reportpath'] = reportpath\n command = \"pep8 %(pyfile)s > %(reportpath)s\" % options\n _subexec(command)", "def run_and_check(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def qa_test():\r\n # Reads Code and Runs Code Metrics\r\n with open(\"BrainDataVisualiser.py\",\"r\") as file:\r\n code = file.read()\r\n with open(\"QA_LOGS.txt\",\"a\") as file:\r\n # Timestamp and append metric results to log\r\n file.write(datetime.date.today().strftime(\"%b-%d-%Y\")+\"\\n\\t\")\r\n file.write(\"General Analysis\\n\\t\\t\")\r\n file.write(str(analyze(code))+\"\\n\\t\")\r\n file.write(\"Cyclomatic Complexity\\n\")\r\n for i in cc_visit(code):\r\n file.write(\"\\t\\t\"+cc_rank(i.complexity)+\" \"+str(i)+\"\\n\")", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)" ]
[ "0.690629", "0.68706906", "0.6722863", "0.65503937", "0.65005994", "0.6477863", "0.63772184", "0.634831", "0.6311739", "0.62938887", "0.62822163", "0.6276711", "0.622485", "0.6216071", "0.61776257", "0.61024976", "0.6045385", "0.6026006", "0.60110664", "0.6001104", "0.5978099", "0.59649086", "0.59622055", "0.5958773", "0.59572273", "0.59536695", "0.5942509", "0.5934273", "0.59210837", "0.59180886" ]
0.7629032
0
Build the Sphinx documentation
def docs(): sh('sphinx-build -W -b html docs docs/_build/html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def build_docs(source, destination, doctrees):\n sphinx_argv = [\n '-b', 'html',\n '-d', doctrees,\n source,\n destination]\n\n sphinx_main(['sphinx-build'] + sphinx_argv)", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def docs(command, warn_is_error=False, options=\"\"):\n print(\n \"\"\"\nRunning Sphinx to test the docs building\n========================================\n\"\"\"\n )\n o = \"-W \" if warn_is_error else \"\"\n if \"-W\" in options:\n options = options.replace(\"-W\", \"\")\n options = options + \" \" + o\n shutil.rmtree(\"docs/_build\", ignore_errors=True)\n shutil.rmtree(\"docs/api\", ignore_errors=True)\n shutil.rmtree(\"docs/code_reference/api\", ignore_errors=True)\n shutil.rmtree(\"docs/jupyter_execute\", ignore_errors=True)\n shutil.rmtree(\"docs/examples/default_config.yaml\", ignore_errors=True)\n command.run(\"python -m boa.config --output-path docs/examples/default_config.yaml\", echo=True, pty=POSIX)\n command.run(f\"sphinx-build {options} -b html docs docs/_build\", echo=True, pty=POSIX)", "def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)", "def task_sphinx():\n return Task(\n file_dep=[CONF], actions=[(needs, [\"sphinx\"])], uptodate=[not CONF.exists()]\n )", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def deploy_sphinx_docs():\n require('docs_root', 'docs_install_dir')\n sphinx.build_html_docs(env.docs_root)\n sudo('mkdir -p {}'.format(env.docs_install_dir))\n sphinx.deploy_html_docs(env.docs_root,\n env.docs_install_dir)", "def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def test_html_documentation(self):\n app = Sphinx(\n self.source_dir,\n self.config_dir,\n self.output_dir,\n self.doctree_dir,\n buildername='html',\n warningiserror=True,\n )\n app.build(force_all=self.all_files)", "def build_docs(branch):\n os.chdir(os.path.join(gitdname, 'docs'))\n sphinx_dir = os.path.join(virtual_dir,'bin')\n retcode = subprocess.call(\"make clean\", shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not clean the html docs for branch %s\"\"\" % branch\n raise Exception(msg)\n #NOTE: The python call in the below makes sure that it uses the Python\n # that is referenced after entering the virtualenv\n sphinx_call = \" \".join(['make','html',\n \"SPHINXBUILD=' python /usr/local/bin/sphinx-build'\"])\n activate = os.path.join(virtual_dir, \"bin\", \"activate\")\n activate_virtualenv = \". \" + activate\n #NOTE: You have to enter virtualenv in the same call. As soon as the\n # child process is done, the env variables from activate are lost.\n # getting the correct env from bin/activate and passing to env is\n # annoying\n retcode = subprocess.call(\" && \".join([activate_virtualenv, sphinx_call]),\n shell=True,\n env = {'MATPLOTLIBRC' : # put this in the environment to use local rc\n '/home/skipper/statsmodels/statsmodels/tools/',\n # Need this for my openblas setup on my laptop\n 'LD_LIBRARY_PATH' : os.getenv('LD_LIBRARY_PATH')})\n\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not build the html docs for branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def run(self):\n name_desc = self.__class__.name_sphinx\n settings = self.state.document.settings\n env = settings.env if hasattr(settings, \"env\") else None\n docname = None if env is None else env.docname\n tag = self.options.get('tag', '').strip()\n n = self.__class__.node_class('')\n n[\"breftag\"] = tag\n n[\"brefsort\"] = self.options.get('sort', 'title').strip()\n n[\"brefsection\"] = self.options.get(\n 'section', True) in (True, \"True\", \"true\", 1, \"1\")\n n[\"brefcontents\"] = self.options.get(\n 'contents', False) in (True, \"True\", \"true\", 1, \"1\", \"\", None, \"None\")\n n['docname'] = docname\n if env is not None:\n targetid = 'index%slist-%s' % (name_desc,\n env.new_serialno('index%slist' % name_desc))\n targetnode = nodes.target('', '', ids=[targetid])\n return [targetnode, n]\n else:\n return [n]", "def sphinx(name, options='', dirname='sphinx-rootdir',\n theme='pyramid', automake_sphinx_options='',\n split=False):\n if name.endswith('.do.txt'):\n name = name.replace('.do.txt', '')\n\n if name.endswith('.do'):\n name = name.replace('.do','')\n\n # Compile source\n cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()\n system(cmd)\n\n if split:\n cmd = 'doconce split_rst %(name)s' % vars()\n\n # Create sphinx directory\n cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()\n system(cmd)\n\n # Compile sphinx\n cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()\n system(cmd)", "def run_sphinx(\n root_dir: Union[str, Path],\n job_count: int = 1,\n warnings_as_errors: bool = False,\n nitpicky: bool = False,\n) -> int:\n src_dir = str(os.path.abspath(root_dir))\n\n argv = [\n f\"-j {job_count}\",\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"_build\", \".doctrees\"),\n ]\n if warnings_as_errors:\n argv.append(\"-W\")\n if nitpicky:\n argv.append(\"-n\")\n argv.extend([src_dir, os.path.join(\"_build\", \"html\")])\n\n start_dir = os.path.abspath(\".\")\n try:\n os.chdir(src_dir)\n status = build_main(argv=argv)\n finally:\n os.chdir(start_dir)\n return status", "def _configure_sphinx(self):\n require('db_host')\n require('db_user')\n require('db_password')\n require('db_name')\n require('sphinx_counter')\n logger.info(\"Configure sphinx search daemon\")\n\n # Build /etc/sphinx.conf\n context = {\n 'database_user': env.db_user,\n 'database_password': env.db_password,\n 'database_name': env.db_name,\n 'database_host': env.db_host,\n 'counter': env.sphinx_counter,\n }\n with hide(*fab_output_hides):\n logger.info(\"Building /etc/sphinxsearch/sphinx.conf\")\n upload_template(\n 'sphinx/sphinx.conf',\n '/etc/sphinxsearch/sphinx.conf',\n context=context,\n use_jinja=True,\n template_dir=CONFIG_TPL_DIR,\n use_sudo=True,\n mode=0644,\n )\n\n script_destination = (\n '/var/lib/sphinxsearch/%s_indexer.sh' % env.db_name\n )\n with hide(*fab_output_hides):\n logger.info(\"Building %s\", script_destination)\n put(\n '../config/tpl/sphinx/policystat_indexer.sh',\n script_destination,\n mode=0755,\n use_sudo=True,\n )\n sudo('chown %s %s' % (F_CHOWN, script_destination))", "def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )", "def sphinxify(docstring, context, buildername='html'):\n\n srcdir = mkdtemp()\n srcdir = encoding.to_unicode_from_fs(srcdir)\n\n base_name = osp.join(srcdir, 'docstring')\n rst_name = base_name + '.rst'\n\n if buildername == 'html':\n suffix = '.html'\n else:\n suffix = '.txt'\n output_name = base_name + suffix\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n if context['right_sphinx_version'] and context['math_on']:\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n \n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n argspec = context['argspec']\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(char,\n '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n \n temp_confdir = False\n if temp_confdir:\n # TODO: This may be inefficient. Find a faster way to do it.\n confdir = mkdtemp()\n confdir = encoding.to_unicode_from_fs(confdir)\n generate_configuration(confdir)\n else:\n confdir = osp.join(get_module_source_path('spyderlib.utils.inspector'))\n\n confoverrides = {'html_context': context}\n\n doctreedir = osp.join(srcdir, 'doctrees')\n\n sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n # TODO: Investigate if this is necessary/important for us\n if osp.exists(output_name):\n output = codecs.open(output_name, 'r', encoding='utf-8').read()\n output = output.replace('<pre>', '<pre class=\"literal-block\">')\n else:\n output = _(\"It was not possible to generate rich text help for this \"\n \"object.</br>\"\n \"Please see it in plain text.\")\n return warning(output)\n\n if temp_confdir:\n shutil.rmtree(confdir, ignore_errors=True)\n shutil.rmtree(srcdir, ignore_errors=True)\n\n return output", "def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass", "def createFakeSphinxProject(self):\n self.sourceDir.child(\"conf.py\").setContent(self.confContent.encode())\n self.sourceDir.child(\"index.rst\").setContent(self.indexContent.encode())", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def _builder_inited(app: sphinx.application.Sphinx) -> None:\n _write_member_documentation_pages(\n _create_documenter(env=app.env,\n documenter_cls=sphinx.ext.autodoc.ModuleDocumenter,\n name='tensorstore'))", "def main(*, build, subdir, description, supports_modules=False,\n supports_quick=False):\n parser = argparse.ArgumentParser(description=description)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--serve\", action='store_true',\n help=\"Serve the documentation on the given PORT for easy preview.\")\n group.add_argument(\n \"--out_dir\", type=str, metavar=\"DIR\",\n help=\"Generate the documentation to the given output directory.\"\n \" The DIR must be an absolute path.\"\n \" If DIR already exists, then it must be empty.\"\n \" (For regression testing, the DIR can be the magic value <test>,\"\n \" in which case a $TEST_TMPDIR subdir will be used.)\")\n parser.add_argument(\n \"--port\", type=int, metavar=\"PORT\", default=8000,\n help=\"Use a non-default PORT when serving for preview.\")\n parser.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Echo detailed commands, progress, etc. to the console\")\n if supports_modules:\n parser.add_argument(\n \"module\", nargs=\"*\",\n help=\"Limit the generated documentation to only these modules and \"\n \"their children. When none are provided, all will be generated. \"\n \"For example, specify drake.math or drake/math for the C++ \"\n \"module, or pydrake.math or pydrake/math for the Python module.\")\n if supports_quick:\n parser.add_argument(\n \"--quick\", action=\"store_true\", default=False,\n help=\"Omit from the output items that are slow to generate. \"\n \"This yields a faster preview, but the output will be incomplete.\")\n args = parser.parse_args()\n if args.verbose:\n global _verbose\n _verbose = True\n curried_build = build\n if supports_modules:\n canonicalized_modules = [\n x.replace('/', '.')\n for x in args.module\n ]\n curried_build = functools.partial(\n curried_build, modules=canonicalized_modules)\n if supports_quick:\n curried_build = functools.partial(\n curried_build, quick=args.quick)\n if args.out_dir is None:\n assert args.serve\n _do_preview(build=curried_build, subdir=subdir, port=args.port)\n else:\n _do_generate(build=curried_build, out_dir=args.out_dir,\n on_error=parser.error)", "def build(\n ctx,\n skip,\n enable_doxygen_conf,\n enable_doxygen,\n enable_symlinks,\n enable_sphinx,\n use_doxygen_conf_in,\n doxygen_conf_defaults_path,\n dox,\n skip_dox,\n warning_is_error,\n nitpicky,\n):\n root_project_dir = discover_conf_py_directory(ctx.obj[\"root_project_dir\"])\n\n if doxygen_conf_defaults_path is not None:\n _doxygen_conf_defaults_path = Path(doxygen_conf_defaults_path)\n else:\n _doxygen_conf_defaults_path = None\n\n return_code = build_stack_docs(\n root_project_dir,\n skipped_names=skip,\n prefer_doxygen_conf_in=use_doxygen_conf_in,\n doxygen_conf_defaults_path=_doxygen_conf_defaults_path,\n enable_doxygen_conf=enable_doxygen_conf,\n enable_doxygen=enable_doxygen,\n enable_package_links=enable_symlinks,\n enable_sphinx=enable_sphinx,\n select_doxygen_packages=dox,\n skip_doxygen_packages=skip_dox,\n warning_is_error=warning_is_error,\n nitpicky=nitpicky,\n )\n if return_code > 0:\n sys.exit(return_code)" ]
[ "0.82856005", "0.8182243", "0.8095304", "0.7946425", "0.7832293", "0.7674444", "0.7616823", "0.75381774", "0.75381774", "0.740071", "0.73747015", "0.73051", "0.71761566", "0.71276677", "0.70834404", "0.7048519", "0.7046627", "0.7035853", "0.70308214", "0.7026935", "0.7019487", "0.7013685", "0.6975512", "0.69328773", "0.6920257", "0.6882421", "0.68726146", "0.6866374", "0.68600476", "0.6834218" ]
0.8379351
0
Update an existing asset.
def update_asset(cls, id, asset_data): return ph_base._update_record('asset', id, asset_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )", "def update(self, instance, validated_data):\n instance.asset_name = validated_data.get('asset_name', instance.asset_name)\n instance.asset_type = validated_data.get('asset_type', instance.asset_type)\n instance.asset_class = validated_data.get('asset_class', instance.asset_class)\n instance.save()\n return instance", "async def on_event_asset_update(self, asset: Asset):\n self._assets = asset\n SingleTask.run(self._asset_update_callback, asset)", "async def on_event_asset_update(self, asset: Asset):\n self._assets = asset\n SingleTask.run(self._asset_update_callback, asset)", "def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)", "def test_update_asset(self):\n pass", "def asset(self, asset):\n\n self._asset = asset", "def update(file: Path, validate_assets: bool, external_url: str) -> None:\n\n mutate(file, validate_assets, external_url, upsert=False)", "def update_asset(self, vid, asset, expiration, timestamp):\n return self \\\n .asset(vid) \\\n .is_asset_id(asset.asset_id) \\\n .choose(\n __.values('first_seen').is_(P.gt(timestamp)),\n __.property(Cardinality.single, 'first_seen', timestamp),\n __.identity(),\n ) \\\n .choose(\n __.values('last_seen').is_(P.lt(timestamp)),\n __.property(Cardinality.single, 'last_seen', timestamp)\n .property(Cardinality.single, 'expiration', expiration),\n __.identity(),\n ) \\\n .elementMap()", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_update_test_asset(self):\n pass", "def replace(self, filter, asset_dict): # client_dict provides the uuid\n mongo_core = MainDb.get_core_db_instance()\n replace_result = mongo_core.get_assets().find_one_and_replace(\n {\"uuid\": asset_dict[\"uuid\"]}, asset_dict, upsert=True, return_document=ReturnDocument.AFTER)\n if replace_result[\"uuid\"] == asset_dict[\"uuid\"]:\n return True, \"MongoAsset replaced\"\n else:\n return False, \"Failed to replace asset\"", "def update_resource(self, **kwargs):\n logging.warning('Updating a resource removes all existing data. '\n 'If you wish to keep the existing data, use `CachedCKAN.patch_resource`.')\n results = self.api.action.resource_update(**kwargs)\n self.get_ckan_metadata(True)\n if 'upload' in kwargs:\n resource_id = results['id'] if 'id' in results else kwargs['id']\n self._import_resource_to_cache(kwargs['upload'], resource_id)\n return results", "def _update_course_assets(self, user_id, asset_key, update_function):\n with self.bulk_operations(asset_key.course_key):\n original_structure = self._lookup_course(asset_key.course_key).structure\n index_entry = self._get_index_if_valid(asset_key.course_key)\n new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)\n course_assets = new_structure.setdefault('assets', {})\n\n asset_type = asset_key.asset_type\n all_assets = SortedAssetList(iterable=course_assets.setdefault(asset_type, []))\n asset_idx = all_assets.find(asset_key)\n\n all_assets_updated = update_function(all_assets, asset_idx)\n new_structure['assets'][asset_type] = list(all_assets_updated)\n\n # update index if appropriate and structures\n self.update_structure(asset_key.course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])", "def test_update_asset_content(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def _update(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n config = misc_utils.resolve_config(\n kwargs.pop('config', None),\n kwargs.pop('config_file', None)\n )\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name),\n method='PUT',\n config=config\n )", "def edit_custom_asset(self, custom_asset: CustomAsset) -> None:\n self._raise_if_custom_asset_exists(custom_asset)\n with GlobalDBHandler().conn.write_ctx() as write_cursor:\n write_cursor.execute(\n 'UPDATE assets SET name=? WHERE identifier=?',\n (custom_asset.name, custom_asset.identifier),\n )\n write_cursor.execute(\n 'UPDATE custom_assets SET notes=?, type=? WHERE identifier=?',\n (custom_asset.notes, custom_asset.custom_asset_type, custom_asset.identifier),\n )\n # this checks if the identifier exists in the db unlike `_raise_if_custom_asset_exists`\n # that checks for the existence of the combination of name & type.\n if write_cursor.rowcount == 0:\n raise InputError(\n f'Tried to edit custom asset with identifier {custom_asset.identifier} and name ' # noqa: E501\n f'{custom_asset.name} but it was not found',\n )", "def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id): # lint-amnesty, pylint: disable=arguments-differ\n def _internal_method(all_assets, asset_idx):\n \"\"\"\n Update the found item\n \"\"\"\n if asset_idx is None:\n raise ItemNotFoundError(asset_key)\n\n # Form an AssetMetadata.\n mdata = AssetMetadata(asset_key, asset_key.path)\n mdata.from_storable(all_assets[asset_idx])\n mdata.update(attr_dict)\n\n # Generate a Mongo doc from the metadata and update the course asset info.\n all_assets.insert_or_update(mdata)\n return all_assets\n\n self._update_course_assets(user_id, asset_key, _internal_method)", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def test_update_system_asset(self):\n pass", "def update(self, resource, id, **data):\n self.request('/' + resource + '/' + str(id), 'PUT', body=urllib.urlencode(data))\n return True", "def test_update_asset_state(self):\n pass", "def test_update_software_asset(self):\n pass", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def upsert(file: Path, validate_assets: bool, external_url: str) -> None:\n\n mutate(file, validate_assets, external_url, upsert=True)", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def test_update_test_asset_content(self):\n pass", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})" ]
[ "0.69374233", "0.68586016", "0.67037857", "0.67037857", "0.6641486", "0.65909207", "0.653334", "0.63545084", "0.6239145", "0.61992556", "0.6113949", "0.60676587", "0.60544235", "0.6035436", "0.59977174", "0.59583884", "0.593529", "0.5906767", "0.5901882", "0.58826965", "0.57996327", "0.57375836", "0.57191026", "0.5694787", "0.5653156", "0.56380713", "0.5611146", "0.5606149", "0.5602093", "0.5596446" ]
0.81140244
0
Checks if the number of images is equal to the number of labels in the path. Input
def _check_images_and_labels(self, image_dir, label_dir): return len(os.listdir(image_dir))==len(os.listdir(label_dir))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_images_a_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if counter >= int(number_of_images_a.get()):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type a to create \"\r\n \"requested grid.\"))\r\n return False", "def number_of_images_b_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if ((number_of_images_b.get() == \"\") or\r\n (counter >= int(number_of_images_b.get()))):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type b to create \"\r\n \"requested grid.\"))\r\n return False", "def __len__(self):\n return len(self.img_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.imgs_path)", "def validate_labels(labels, path):\n for labels_ in labels.values():\n for label in labels_:\n for ann in label['annotations']:\n assert len(ann['segmentation']) == 1\n assert len(ann['segmentation'][0]) % 2 == 0\n\n label['annotations'] = [\n ann\n for ann in label['annotations']\n if len(ann['segmentation'][0]) >= 6\n ]\n assert len(label['annotations']) > 0\n label['file_name'] = path + '/' + label['file_name']\n\n for k in labels:\n labels[k] = [\n label for label in labels[k]\n if os.path.exists(label['file_name'])\n ]\n return labels", "def count_labels(labels_path):\n counts = np.zeros(4)\n with open(labels_path, 'r') as f:\n for line in f:\n line = int(line.split()[1]) - 1\n counts[line] += 1\n\n return counts", "def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)", "def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)", "def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))", "def get_num_of_images(self):", "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def number_of_images_valid():\r\n if number_of_images_a_valid() and number_of_images_b_valid():\r\n return True\r\n else:\r\n return False", "def nb_im(self, code):\n return len(os.listdir(self._im_dir[code]))", "def num_labels(self):\n return len(self.get_labels())", "def num_labels(self):\n return len(self.get_labels())", "def num_labels(self) -> int:\n raise NotImplementedError", "def __len__(self):\r\n return len(self.img_names)", "def correct_batch_size_in_files(self):\n print('checking correct file sizes')\n all_ok = True\n for f in self.data_filenames:\n all_ok *= (np.load(f).shape[0] == self.batch_size)\n if not all_ok:\n break\n print(all_ok)\n return all_ok", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def __len__(self):\n return len(self.labels)", "def check_image_size(image_folder_path, height=None, width=None):\n total_img_list = glob.glob(os.path.join(image_folder_path, \"*\"))\n counter = 0\n for image in tqdm(total_img_list, desc=\"Checking in progress\"):\n try:\n img = cv2.imread(image)\n\n # Review Comments:\n #\n # I assume you were trying to initialize width and height\n # if they are not defined by the caller. I have rewritten\n # your code to do this successfully - before you were just\n # comparing the height and width of each image with\n # itself.\n if height is None:\n height = img.shape[1]\n\n if width is None:\n width = img.shape[0]\n\n if not (height == img.shape[1] and width == img.shape[0]):\n counter += 1\n # Review Comments: What exception are you trying to catch here?\n # In general, you should not have a bare except block.\n except:\n print(\"this {} is corrupted\".format(image))\n continue\n return counter", "def __len__(self):\n return len(self.image_file_names)", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory" ]
[ "0.6857187", "0.6778049", "0.6608997", "0.6470868", "0.6470868", "0.6470868", "0.64671683", "0.63645536", "0.6209617", "0.6172412", "0.6172412", "0.6160495", "0.61398166", "0.6118098", "0.60889083", "0.6088829", "0.60738635", "0.6062706", "0.60482156", "0.60111576", "0.60111576", "0.5988173", "0.59665173", "0.5950841", "0.59455514", "0.5937964", "0.5935239", "0.5887972", "0.5848851", "0.5848851" ]
0.7553603
0
Given the file handle of the file containing image data, it returns a list of the objects contained in the image. Returns objects (list(object)), where object is a dict. Currently, object has only one key, namely 'polygon' which is a list of points in clockwise order.
def _get_objects(self,label_fh): objects = [] for line in label_fh.readlines(): try: object = {} line = line.replace(u'\ufeff', '') if line != '': x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]] p1 = (x1, y1) p2 = (x2, y2) p3 = (x3, y3) p4 = (x4, y4) object['polygon'] = [p1,p2,p3,p4] objects.append(object) except: pass return objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocessing_objects(img_data, hierarchy_mapping, object_file_name='objects.p'):\n\n object_path_token = \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(object_file_name))\n\n # Check if pickles are already created\n objects_path = FilesManager().get_file_path(object_path_token)\n\n if os.path.isfile(objects_path):\n Logger().log('File is already exist {0}'.format(objects_path))\n objects = FilesManager().load_file(object_path_token)\n return objects\n\n # Bad urls which should be sorted out\n bad_urls = get_bad_urls()\n\n # Get the whole objects from entities\n objects_lst = []\n correct_labels = hierarchy_mapping.keys()\n idx = 0\n for img in img_data:\n\n # Get the url image\n url = img.image.url\n\n # Sorting bad urls\n if url in bad_urls:\n continue\n\n # Get the objects per image\n objects = img.objects\n for object in objects:\n\n # Get the lable of object\n label = object.names[0]\n\n # Check if it is a correct label\n if label not in correct_labels:\n continue\n\n new_object_mapping = ObjectMapping(object.id, object.x, object.y, object.width, object.height, object.names,\n object.synsets, url)\n # Append the new objectMapping to objects_lst\n objects_lst.append(new_object_mapping)\n\n idx += 1\n Logger().log(\"Finished img: {}\".format(idx))\n\n # Pickle objects_lst\n objects_array = np.array(objects_lst)\n # Save the objects files to the disk\n FilesManager().save_file(object_path_token, objects_array)\n return objects_array", "def read_bounding_boxes(filename):\n f = open(filename)\n objects = []\n weight = 0\n height = 0\n for line in f:\n print(line)\n first_word = line.split(';')[0]\n if first_word == \"Dimensions\":\n weight = line.split(';')[1]\n height = line.split(';')[2]\n if first_word == \"Object\":\n objects.append((line.split(';')[1], line.split(';')[2], line.split(';')[4],\n line.split(';')[5], line.split(';')[6], line.split(';')[7]))\n return weight, height, objects", "def extract_object_properties(segmented_image_path, intensity_image_path, image_name, xy_scale, z_scale):\n\n print('Extracting object properties for {image_name}'.format(image_name=image_name))\n\n # import packages needed for object extraction\n from skimage.io import imread\n from scipy.ndimage import label as ndi_label\n from skimage import measure\n\n # read in images\n segmented_image = imread(segmented_image_path)\n intensity_image = imread(intensity_image_path)\n\n # label connected components\n labeled, num_features = ndi_label(segmented_image)\n\n # measure properties\n region_properties = measure.regionprops(labeled, intensity_image = intensity_image)\n\n object_data_list = []\n\n for prop in region_properties:\n\n # apply the z scale and xy scales to the centroid and coordinates lists\n centroid = list(prop.centroid)\n centroid_scaled = [centroid[0] * z_scale, centroid[1]*xy_scale, centroid[2] * xy_scale]\n\n coords = prop.coords.tolist()\n coords_scaled = [[coord[0]*z_scale, coord[1]* xy_scale, coord[2]*xy_scale] for coord in coords ]\n\n # create a dict containing object properties\n object_properties_dict = {\n 'area': int(prop.area),\n 'min_intensity' : int(prop.min_intensity),\n 'max_intensity' : int(prop.max_intensity),\n 'mean_intensity' : int(prop.mean_intensity),\n 'total_intensity': int(prop.intensity_image.sum()),\n 'object_id' : int(prop.label),\n 'name': image_name,\n 'centroid': centroid_scaled,\n 'coordinates': coords_scaled,\n 'intensity_image': prop.intensity_image.tolist()}\n\n object_data_list.append(object_properties_dict)\n\n return object_data_list", "def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass", "def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)", "def readFromFile(self, infile):\n\n self.mMapComponent2Object = {}\n self.mMapObject2Component = {}\n\n for line in infile:\n if line[0] == \"#\":\n continue\n\n data = line[:-1].split(\"\\t\")\n\n obj_id, obj_start, obj_end, ncoms, com_type, com_id = data[:6]\n\n if com_type == \"N\":\n continue\n com_start, com_end, orientation = data[6:9]\n\n obj_start, obj_end = int(obj_start) - 1, int(obj_end)\n com_start, com_end = int(com_start) - 1, int(com_end)\n\n orientation = orientation in (\"+\", \"0\", \"na\")\n\n if com_start != 0:\n raise ValueError(\"non zero com_start\")\n\n object = ObjectPosition()\n object.mId = obj_id\n object.start = obj_start\n object.end = obj_end\n object.mOrientation = orientation\n\n self.mMapComponent2Object[com_id] = object", "def localize_objects(self,path):\n\t\tfrom google.cloud import vision\n\t\t\n\t\tclient = vision.ImageAnnotatorClient()\n\n\t\twith open(path, 'rb') as image_file:\n\t\t\tcontent = image_file.read()\n\t\timage = vision.types.Image(content=content)\n\n\t\tobjects = client.object_localization(\n\t\t\timage=image).localized_object_annotations\n\t\treturn objects\n\n\t\t#print('Number of objects found: {}'.format(len(objects)))\n\t\t#for object_ in objects:\n\t\t#\tprint('\\n{} (confidence: {})'.format(object_.name, object_.score))\n\t\t#\tprint('Normalized bounding polygon vertices: ')\n\t\t#\tfor vertex in object_.bounding_poly.normalized_vertices:\n\t\t#\t\tprint(' - ({}, {})'.format(vertex.x, vertex.y))", "def load_geojsons2(filepath):\n jsons = glob(os.path.join(filepath, '*.json'))\n features = []\n for json_path in tqdm(jsons, desc='loading geojson files'):\n with open(json_path) as f:\n data_dict = json.load(f)\n features.append(data_dict)\n\n obj_coords = list()\n image_ids = list()\n class_indices = list()\n class_names = list()\n\n for feature in tqdm(features, desc='extracting features'):\n for i in range(len(feature['object'])):\n if feature['object'][i]['label'] != 'gbg':\n image_ids.append(feature['filename'])\n obj_coords.append(feature['object'][i]['points'])\n class_indices.append(int(feature['object'][i]['label'][-1])-1)\n class_names.append(feature['object'][i]['label'])\n \n return image_ids, obj_coords, class_indices, class_names", "def load_geojsons(filepath):\n jsons = sorted(glob(os.path.join(filepath, '*.json')))\n features = []\n for json_path in tqdm(jsons, desc='loading geojson files'):\n with open(json_path) as f:\n data_dict = json.load(f)\n features.append(data_dict)\n\n obj_coords = list()\n image_ids = list()\n class_indices = list()\n class_names = list()\n\n for feature in tqdm(features, desc='extracting features'):\n for i in range(len(feature['object'])):\n if feature['object'][i]['label'] != 'gbg':\n try:\n image_ids.append(feature['file_name'])\n obj_coords.append(feature['object'][i]['box'])\n except:\n image_ids.append(feature['filename'])\n obj_coords.append(feature['object'][i]['points'])\n\n class_indices.append(int(feature['object'][i]['label'][-1])-1)\n class_names.append(feature['object'][i]['label'])\n \n return image_ids, obj_coords, class_indices, class_names", "def make_int_object_list(self):\n from libtbx import easy_pickle as ep\n\n if self.params.cctbx.selection.select_only.grid_search_path == None:\n int_dir = misc.set_base_dir('integration', True)\n else:\n int_dir = self.params.cctbx.selection.select_only.grid_search_path\n\n img_objects = []\n\n # Inspect integration folder for image objects\n for root, dirs, files in os.walk(int_dir):\n for filename in files:\n found_file = os.path.join(root, filename)\n if found_file.endswith(('int')):\n obj = ep.load(found_file)\n img_objects.append(obj)\n\n # Pick a randomized subset of images\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(img_objects):\n gs_img_objects = self.select_random_subset(img_objects)\n else:\n gs_img_objects = img_objects\n\n return gs_img_objects", "def get_keypoints(self, image_path, image_data):\r\n print 'Using serializer', Pyro4.config.SERIALIZER\r\n\r\n base_path, ext = os.path.splitext(image_path)\r\n image_name = 'image' + ext\r\n img_local_path = os.path.join(self.local_path, image_name)\r\n\r\n with open(img_local_path, 'wb') as f:\r\n f.write(image_data)\r\n\r\n self.update_imagelist(img_local_path)\r\n self.detect_face()\r\n bbox_data = self.get_bbox_data()\r\n\r\n if self.is_face_detected(bbox_data):\r\n self.format_bbox_file(image_name, bbox_data)\r\n self.detect_keypoints()\r\n keypoints = self.kpts_from_binary()\r\n else:\r\n keypoints = []\r\n\r\n return keypoints", "def get_paintings(orb):\n paintings = []\n with open('data/data.csv', 'r') as f:\n f_reader = csv.DictReader(f)\n for row in f_reader:\n im = cv2.imread(f\"{PAINTINGS_FOLDER}/{row['Image']}\")\n kp, descr = compute_kp_descr(im, orb)\n image = Image(filename=row['Image'], image=im, descriptors=descr, keypoints=kp)\n image.title = row['Title']\n image.author = row['Author']\n image.room = row['Room']\n paintings.append(image)\n return paintings", "def loadjson(path, objectsofinterest, img):\n with open(path) as data_file:\n data = json.load(data_file)\n # print (path)\n pointsBelief = []\n boxes = []\n points_keypoints_3d = []\n points_keypoints_2d = []\n pointsBoxes = []\n poses = []\n centroids = []\n\n translations = []\n rotations = []\n points = []\n\n for i_line in range(len(data['objects'])):\n info = data['objects'][i_line]\n if not objectsofinterest is None and \\\n not objectsofinterest in info['class'].lower():\n continue\n\n box = info['bounding_box']\n boxToAdd = []\n\n boxToAdd.append(float(box['top_left'][0]))\n boxToAdd.append(float(box['top_left'][1]))\n boxToAdd.append(float(box[\"bottom_right\"][0]))\n boxToAdd.append(float(box['bottom_right'][1]))\n boxes.append(boxToAdd)\n\n boxpoint = [(boxToAdd[0], boxToAdd[1]), (boxToAdd[0], boxToAdd[3]),\n (boxToAdd[2], boxToAdd[1]), (boxToAdd[2], boxToAdd[3])]\n\n pointsBoxes.append(boxpoint)\n\n # 3dbbox with belief maps\n points3d = []\n\n pointdata = info['projected_cuboid']\n for p in pointdata:\n points3d.append((p[0], p[1]))\n\n # Get the centroids\n pcenter = info['projected_cuboid_centroid']\n\n points3d.append((pcenter[0], pcenter[1]))\n pointsBelief.append(points3d)\n points.append(points3d + [(pcenter[0], pcenter[1])])\n centroids.append((pcenter[0], pcenter[1]))\n\n # load translations\n location = info['location']\n translations.append([location[0], location[1], location[2]])\n\n # quaternion\n rot = info[\"quaternion_xyzw\"]\n rotations.append(rot)\n\n return {\n \"pointsBelief\": pointsBelief,\n \"rotations\": rotations,\n \"translations\": translations,\n \"centroids\": centroids,\n \"points\": points,\n \"keypoints_2d\": points_keypoints_2d,\n \"keypoints_3d\": points_keypoints_3d,\n }", "def parse_rec(filename):\n tree = et.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,\n int(bbox.find('ymin').text) - 1,\n int(bbox.find('xmax').text) - 1,\n int(bbox.find('ymax').text) - 1]\n objects.append(obj_struct)\n\n return objects", "def read_shp(filename):\n sr = shapefile.Reader(filename)\n \n if sr.shapeType == shapefile.POLYGON:\n shapes = sr.shapes()\n geometries = [Polygon(shape.points) for shape in shapes]\n \n fields = sr.fields[:]\n if fields[0][0] == 'DeletionFlag':\n fields.pop(0)\n fields = [field[0] for field in fields] # extract field name only\n \n records = []\n for record in sr.records():\n for i, value in enumerate(record):\n try:\n record[i] = float(value) # convert record values to numeric...\n except ValueError:\n pass # ... if possible\n \n records.append(record)\n \n return (geometries, records, fields)\n \n elif sr.shapeType == shapefile.POLYLINE:\n shapes = sr.shapes()\n geometries = [LineString(shape.points) for shape in shapes]\n \n fields = sr.fields[:] # [:] = duplicate field list\n if fields[0][0] == 'DeletionFlag':\n fields.pop(0)\n fields = [field[0] for field in fields] # extract field name only\n \n records = []\n for record in sr.records():\n for i, value in enumerate(record):\n try:\n record[i] = float(value) # convert record values to numeric...\n except ValueError:\n pass # ... if possible\n \n records.append(record)\n \n return (geometries, records, fields)\n \n \n elif sr.shapeType == shapefile.MULTIPOINT:\n raise NotImplementedError\n \n else:\n raise NotImplementedError", "def loadjson(path, objectsofinterest, img):\n with open(path) as data_file: \n data = json.load(data_file)\n # print (path)\n pointsBelief = []\n boxes = []\n points_keypoints_3d = []\n points_keypoints_2d = []\n pointsBoxes = []\n poses = []\n centroids = []\n\n translations = []\n rotations = []\n points = []\n\n for i_line in range(len(data['objects'])):\n info = data['objects'][i_line]\n if not objectsofinterest is None and \\\n not objectsofinterest in info['class'].lower():\n continue \n \n box = info['bounding_box']\n boxToAdd = []\n\n boxToAdd.append(float(box['top_left'][0]))\n boxToAdd.append(float(box['top_left'][1]))\n boxToAdd.append(float(box[\"bottom_right\"][0]))\n boxToAdd.append(float(box['bottom_right'][1]))\n boxes.append(boxToAdd)\n\n boxpoint = [(boxToAdd[0],boxToAdd[1]),(boxToAdd[0],boxToAdd[3]),\n (boxToAdd[2],boxToAdd[1]),(boxToAdd[2],boxToAdd[3])]\n\n pointsBoxes.append(boxpoint)\n \n # 3dbbox with belief maps\n points3d = []\n \n pointdata = info['projected_cuboid']\n for p in pointdata:\n points3d.append((p[0],p[1]))\n\n # Get the centroids\n pcenter = info['projected_cuboid_centroid']\n\n points3d.append ((pcenter[0],pcenter[1]))\n pointsBelief.append(points3d)\n points.append (points3d + [(pcenter[0],pcenter[1])])\n centroids.append((pcenter[0],pcenter[1]))\n\n # load translations\n location = info['location']\n translations.append([location[0],location[1],location[2]])\n\n # quaternion\n rot = info[\"quaternion_xyzw\"]\n rotations.append(rot)\n\n return {\n \"pointsBelief\":pointsBelief, \n \"rotations\":rotations,\n \"translations\":translations,\n \"centroids\":centroids,\n \"points\":points,\n \"keypoints_2d\":points_keypoints_2d,\n \"keypoints_3d\":points_keypoints_3d,\n }", "def getCentroids(self):\n\t\timages=[]\n\t\tcentroids_dict={}\n\t\t#Find all images of the objects\n\t\tfor file in os.listdir(objects_dir):\n\t \tif file.endswith(self.extension):\n\t \timages.append(os.path.join(self.objects_dir, file))", "def detect_objects_on_image(image_path, detections_file='pickles/bounding_boxes.pickle'):\n image_name = os.path.basename(image_path)\n try:\n with open(detections_file, 'rb') as handle:\n detections = pickle.load(handle)\n except FileNotFoundError:\n print('Detections file not found!')\n detections = {}\n if image_name in detections:\n print(image_name, 'is already in detections file!')\n print('Bounding boxes from file', detections[image_name])\n return detections[image_name]\n else:\n print('Adding to detections file', image_name)\n _, _, bound_boxes = run_yolo_onpic(image_path)\n detections[image_name] = bound_boxes\n print('Bounding boxes', bound_boxes)\n fileObject = open(detections_file, 'wb')\n pickle.dump(detections, fileObject)\n fileObject.close()\n return bound_boxes", "def shapefile_generator(filename):\n with fiona.open(filename) as collection:\n for item in collection:\n item['geometry'] = transform_geom(\n collection.meta['crs'], 'epsg:4326', item['geometry'])\n yield item", "def localize_objects(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n\n objects = client.object_localization(\n image=image).localized_object_annotations\n\n print('Number of objects found: {}'.format(len(objects)))\n for object_ in objects:\n print('\\n{} (confidence: {})'.format(object_.name, object_.score))\n print('Normalized bounding polygon vertices: ')\n for vertex in object_.bounding_poly.normalized_vertices:\n print(' - ({}, {})'.format(vertex.x, vertex.y))", "def read_from(self, infile: Path):\n logger.info(f'Processing {infile.name}...')\n with BioReader(infile) as reader:\n self.metadata = reader.metadata\n\n tile_size, num_slices, num_cols, num_rows = self._get_iteration_params(reader.Z, reader.Y, reader.X)\n tile_count = 0\n for z in range(0, reader.Z, tile_size):\n z_max = min(reader.Z, z + tile_size)\n for y in range(0, reader.Y, tile_size):\n y_max = min(reader.Y, y + tile_size)\n for x in range(0, reader.X, tile_size):\n x_max = min(reader.X, x + tile_size)\n\n tile = numpy.squeeze(reader[y:y_max, x:x_max, z::z_max, 0, 0])\n tile = (tile != 0).astype(numpy.uint8)\n if tile.ndim == 2:\n tile = tile[numpy.newaxis, :, :]\n else:\n tile = tile.transpose(2, 0, 1)\n self.__polygon_set.add_tile(tile, (z, y, x))\n tile_count += 1\n logger.debug(f'added tile #{tile_count} ({z}:{z_max}, {y}:{y_max}, {x}:{x_max})')\n logger.info(f'Reading Progress {100 * tile_count / (num_slices * num_cols * num_rows):6.3f}%...')\n \n logger.info('digesting polygons...')\n self.__polygon_set.digest()\n\n self.num_polygons = self.__polygon_set.len()\n logger.info(f'collected {self.num_polygons} polygons')\n return self", "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def photo_to_list(path):\n im = Image.open(path)\n im = im.convert(\"L\")\n ens = {a for a in list(im.getdata())}\n width, height = im.size\n im = list(im.getdata())\n tab = [[im[y*width+x] for x in range(width)] for y in range(height)]\n return tab", "def _svg_to_polygons(cdata):\n polygons = []\n groups = parse(cdata['image'])\n\n #iterating this dict in a strange way, need to refactor maybe\n for g in groups:\n for path in groups[g]:\n #this list comprehension gets the region coordinates\n points = ([(p[0] * cdata['width_ratio'] + cdata['start_pos'], p[1]\n * cdata['height_ratio']) for p in path[1]])\n\n polygons.append({_convert_state_to_region(g):points})\n\n return polygons", "def read(file):\n \n ole = OleFileIO(file)\n stream = ole.openstream(\"FileHeader\")\n \n objects = list()\n while True:\n length = stream.read(4)\n if not length:\n break\n (length,) = struct.unpack(\"<I\", length)\n \n properties = stream.read(length - 1)\n obj = dict()\n for property in properties.split(b\"|\"):\n if not property:\n # Most (but not all) property lists are\n # prefixed with a pipe \"|\",\n # so ignore an empty property before the prefix\n continue\n \n (name, value) = property.split(b\"=\", 1)\n name = name.decode(\"ascii\")\n existing = obj.get(name)\n if existing not in (None, value):\n msg = \"Conflicting duplicate: {!r}, was {!r}\"\n warn(msg.format(property, existing))\n obj[name] = value\n \n objects.append(obj)\n \n # Skip over null terminator byte\n stream.seek(+1, SEEK_CUR)\n \n return objects", "def geotiff_read(ifile,metaData):\r\n\r\n file = gdal.Open(ifile, GA_ReadOnly)\r\n\r\n projection = file.GetProjection()\r\n src = osr.SpatialReference()\r\n src.ImportFromWkt(projection)\r\n proj = src.ExportToWkt()\r\n\r\n Nx = file.RasterXSize\r\n Ny = file.RasterYSize\r\n\r\n trans = file.GetGeoTransform()\r\n\r\n dx = trans[1]\r\n dy = trans[5]\r\n\r\n if metaData == \"A\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]\r\n\r\n if metaData == \"P\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + Xp*trans[4] + Yp*trans[5]\r\n\r\n band = file.GetRasterBand(1)\r\n\r\n Z = band.ReadAsArray()\r\n\r\n dx = np.abs(dx)\r\n dy = np.abs(dy)\r\n\r\n return X, Y, Z, dx, dy, proj", "def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n f_pos = Series(result[0], name=\"f_pos\")\n r_pos = Series(result[1], name=\"r_pos\")\n return (\n f_pos,\n r_pos,\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )", "def _read_object_list():\n\n scene_object_list = []\n logger.info(\"Reading '%s' config file for VIMAN poster\" % object_config_file)\n\n try:\n fp = open(object_config_file, \"r\")\n for line in fp:\n match = re.search('object (\\w+)', line)\n if match != None:\n scene_object_list.append(match.group(1))\n logger.debug(\"\\t- %s\" % match.group(1))\n fp.close()\n except IOError as detail:\n logger.debug(detail)\n logger.info(\"ARToolkit tag library not found. Skipping it.\")\n return []\n\n return scene_object_list", "def readobject(filename):\n # import cPickle as pickle\n with open(filename, 'rb') as input_file:\n return pickle.load(input_file)", "def read_geometry(geometry_filename, quiet=True):\n\n result_dict = {}\n\n extension = os.path.splitext(geometry_filename)[1]\n # read the geometry differently depending on the file format\n if extension == \".geom\":\n parser = GeometryFileParser(geometry_filename)\n result_dict = parser.pixel_map_for_cxiview()\n\n elif extension == \".h5\":\n x, y, r, dx_m = read_pixelmap(geometry_filename)\n coffset = float('nan')\n # clen is not neccessarily an integer so we choose as default the None \n # type\n clen = None\n\n # find the smallest size of cspad_geom that contains all\n # xy values but is symmetric about the origin\n M = 2 * int(max(abs(x.max()), abs(x.min()))) + 2\n N = 2 * int(max(abs(y.max()), abs(y.min()))) + 2\n\n\n # convert x y values to i j values Minus sign for y-axis because Python\n # takes (0,0) in top left corner instead of bottom left corner\n\n # Note to Valerio: Do not add the offset to convert to image\n # coordinates staring at (0,0) as we may want the actual pixel\n # coordinates This means do not center array here --> it is done in\n # pixel_remap instead Returning actual coordinates (x,y) is better for\n # other operations such as radial averages\n\n x = x\n y = -y\n img_shape = (M, N)\n\n result_dict = {\n 'x' : x.flatten(),\n 'y' : y.flatten(),\n 'r' : r.flatten(),\n 'dx' : dx_m,\n 'coffset' : coffset,\n 'shape' : img_shape,\n 'clen' : clen\n }\n else:\n print(\"Error reading geometry file: \" + geometry_filename) \n print(\"Unknown geometry file format: \" + extension)\n exit()\n\n # Print a sanity check unless suppressed\n if not quiet:\n print('----------')\n print('Geometry info:')\n print('X range (pix): ', x.min(), x.max())\n print('Y range (pix): ', y.min(), y.max())\n print('R range (pix): ', r.min(), r.max())\n print('Pixel size (m): %4.6f' % (dx_m))\n print(\"Geometry shape: \", x.shape)\n print(\"Geometry elements: \", x.flatten().shape)\n print(\"Assembled image size: \", img_shape)\n\n return result_dict" ]
[ "0.62237", "0.61571246", "0.5929641", "0.59224886", "0.58840525", "0.5877584", "0.5760753", "0.57414246", "0.56997913", "0.56704485", "0.5646486", "0.5590826", "0.55823386", "0.5573562", "0.5562786", "0.5545621", "0.55408007", "0.5518088", "0.5517315", "0.5512284", "0.5482167", "0.5481303", "0.54740804", "0.5457734", "0.54446125", "0.5443305", "0.54425764", "0.5436497", "0.5433423", "0.5416434" ]
0.69687027
0
Given the list of objects, it returns an array mapping object ids to their respective classes. Background has class 0 and text has class 1.
def _get_object_classes(self,objects): object_class = [1 for object in objects] object_class.insert(0, 0) return object_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def process_class_list(self, module, classes):", "def _extract_class(labels: List[int], class_index: int):\n class_ids = [i for i, label in enumerate(labels) if label == class_index]\n return class_ids", "def unique_classes(srcfile, listfile):\n cls_list = []\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n objs = tree.findall('object')\n\n for ix, obj in enumerate(objs):\n cls = obj.find('name').text\n if cls in cls_list:\n pass\n else:\n cls_list.append(cls)\n print(cls)", "def _get_class(self, obj):\n\n object_type = obj.object_type\n\n 'Background class'\n object_class = 0\n\n # Don't care classes\n if object_type in ['DontCare', 'Person_sitting'] or obj.truncation > 0.75 or obj.occlusion > 1:\n object_class = 1\n\n # Vehicle classes\n elif object_type in ['Car', 'Van']:\n object_class = 2\n\n # Pedestrian class\n elif object_type in ['Pedestrian']: # TODO: Consider change this with ==\n object_class = 3\n\n # Cyclist class\n elif object_type in ['Cyclist']: # TODO: Consider change this with ==\n object_class = 4\n\n return object_class", "def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes", "def classes(attrs):\n return attrs.get('class', '').split()", "def _extract_classes(labels: List[int], class_indexes: List[int]) -> List[int]:\n indexes = set()\n for class_id in class_indexes:\n filtered = DatasetSeObjectsUnit._extract_class(labels, class_id)\n indexes = set.union(indexes, set(filtered))\n\n result = list(indexes)\n result.sort()\n return result", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def classify(list_of_sets, sort=True):\n classifier = Classifier(sort=sort)\n classifier.update(list_of_sets)\n return classifier.getClasses(), classifier.getMapping()", "def count_class(srcfile, listfile):\n cls_list = []\n\n # open the list file\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n # check each file in the list\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n\n # objs is all the objects in the xml\n objs = tree.findall('object')\n\n # find the class name in the object, and add it to the cls list\n for ix, obj in enumerate(objs):\n cls = str(obj.find('name').text)\n cls_list.append(cls)\n\n # find the keys and sort, count the number of boxes of the keys\n if len(cls_list) > 0:\n cls_list.sort()\n import numpy as np\n cls_arr = np.array(cls_list)\n cls1 = list(set(cls_list))\n print('unsort classes is:', cls1)\n cls1.sort()\n print('sorted classes is:', cls1)\n classes = np.unique(cls_arr)\n print('the class number is:', classes.shape[0])\n print('----------------------------')\n print('the number of each class:')\n for i in range(0, classes.shape[0]):\n # print(classes[i], cls_list.count(classes[i]))\n print(classes[i], ':', np.where(cls_arr==classes[i])[0].shape[0])\n print('----------------------------')\n\n print('the number of all the boxes is:', len(cls_list))\n return cls_list", "def classes(self) -> Iterable[GDScriptClass]:\n for item in self._classes_by_type_id.values():\n yield item", "def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret", "def classes(self) -> List[Any]:\n return list(self.label_counts.keys())", "def get_classes():\n file_name = 'imagenet_class_index.json'\n file_origin = os.path.join(FILE_PATH, file_name)\n file_path = get_file(file_name, file_origin, cache_subdir='models')\n with open(file_path) as f:\n class_dict = json.load(f)\n return [class_dict[str(i)][1] for i in range(len(class_dict))]", "def get_classes_with_colors(self):\n i = 0\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n if len(c) != 3:\n c += (self.default_colors[i],)\n i += 1\n out_classes += (c,)\n\n return (\n (self.outside_class, self.outside_class_display, self.outside_color),\n ) + out_classes", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def _class_count(objects):\n\n totals = {}\n for obj in objects:\n try:\n cls = obj.__class__\n except AttributeError:\n cls = type(obj)\n name = \"%s.%s\" % (cls.__module__, cls.__name__)\n try:\n totals[name].append(obj)\n except KeyError:\n totals[name] = [obj]\n\n totals = totals.items()\n totals.sort(lambda a,b: cmp(len(a[1]),len(b[1])))\n totals = totals[-20:] # Is this a reasonable filter?\n return totals", "def get_classes(engine: Engine) -> Dict[str, PlayableClass]:\n\n classes = engine.get_classes()\n assert classes is not None\n\n class_objs = {}\n for class_idx_data in classes:\n class_data = PlayableClass(engine, class_idx_data[\"id\"])\n class_objs[class_data.to_serialize[\"slug\"]] = class_data\n return class_objs", "def _classification(text_path_list, id_list, label_list):\n textnum = len(text_path_list)\n batched_num = ((textnum - 1) // classify.BATCH_SIZE + 1) * classify.BATCH_SIZE\n for i in range(batched_num - textnum):\n text_path_list.append(text_path_list[0])\n id_list.append(id_list[0])\n annotations = classify_obj.inference(text_path_list, id_list, label_list) #\n return annotations[0:textnum]", "def _class_list(parent, section, objects, refs):\n\n sec = etree.SubElement(parent, section, count=str(len(objects)))\n\n for cls, objs in _class_count(objects):\n obj = etree.SubElement(sec, \"Object\", type=cls, count=str(len(objs)))\n if refs:\n _class_list(obj, \"Referrers\", gc.get_referrers(*objs), False)", "def get_class_id_list_from_class_list(self):\n tr_list = self.soup.find(\n id='main-content'\n ).table.tbody.find_all('tr')\n href_list = []\n for tr in tr_list:\n qs = urllib.parse.urlparse(tr.find(title='Edit')['href'])[4]\n id_ = urllib.parse.parse_qs(qs)['id'][0]\n href_list.append(id_)\n return href_list", "def getAllCls(cls):\n newlist = list(clslist)\n return newlist", "def get_classes(self):\n\n # Sort them.\n classes = ['Safe','Violence','Gun','Cold_Arms','Smoking','Kissing']\n classes = sorted(classes)\n\n # Return.\n if self.class_limit is not None:\n return classes[:self.class_limit]\n else:\n return classes", "def constructClassTable(G, classes):\n res = dict((c, set()) for c in classes)\n for v, data in G.nodes(data=True):\n c = data['class']\n if c in classes:\n res[c].add(v)\n return res", "def detect_objects(image, threshold, classes_incl=None):\n set_input_tensor(image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(0)\n classes = get_output_tensor(1)\n scores = get_output_tensor(2)\n count = int(get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n if not classes_incl:\n results.append(result)\n elif classes[i] in classes_incl:\n results.append(result)\n return results", "def get_instances_of_class(cls, folder):\n data = list()\n for _, _, filenames in os.walk(folder):\n for filename in filenames:\n if filename.endswith(\".jpg\"):\n last = filename.split(\"/\")[-1]\n if re.match(cls, last):\n data.append(last)\n return data", "def get_classes(self, lines):\n result = []\n classes = self._split_lines(lines, Class.TITLE_MARKER)\n for c in classes:\n signature = self._get_group_title(c, Class.TITLE_MARKER)\n name, parent = self._split_title(signature)\n docstring = self.get_docstring(c)\n methods = self.get_methods(c)\n class_ = Class(name, parent, docstring, methods)\n if class_.is_public() or self.show_nonpublic:\n result.append(class_)\n return result", "def get_classes(self):\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n out_classes += (c[:2],)\n\n return ((self.outside_class, self.outside_class_display),) + out_classes", "def filter_classes(class_ints, class_list, class_filt):\n class_names = [class_list[int(c)] for c in class_ints]\n filter = [name in class_filt for name in class_names]\n return np.array(filter)" ]
[ "0.6236715", "0.6033983", "0.60268164", "0.6010577", "0.59391886", "0.59230375", "0.58449787", "0.5836437", "0.5769628", "0.5737785", "0.5726809", "0.567599", "0.5665862", "0.56647223", "0.56467086", "0.55916905", "0.55804026", "0.55234677", "0.55206275", "0.5503181", "0.5483373", "0.5461113", "0.54564136", "0.54321223", "0.54273474", "0.541432", "0.5414122", "0.54061365", "0.53952616", "0.5390848" ]
0.64411694
0
Make an Orders request, store the page count and process the response data.
def first_request(self): response_data = self.make_order_request(1) self.page_count = response_data[self.DATA][self.LAST_PAGE] self.add_orders(response_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")", "def make_order_request(self, page):\n return api_methods.Orders(\n page=page,\n per_page=self.PER_PAGE,\n from_date=self.from_date,\n start_date=self.start_date,\n end_date=self.end_date,\n deal_id=self.deal_id,\n ).call()", "async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def orders ( self, block: bool = True ):\n\tresult = OutstandingOrders(\n\t\tauth\t\t= self.auth,\n\t\taccount_nbr = self.account_nbr,\n\t\tblock\t\t= block\n\t).request()\n\n\treturn result", "def add_orders(self, response_data):\n orders = response_data[self.DATA][self.DATA]\n for order in orders:\n self.orders.append(self.process_order_data(order))", "def get_orders(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params={**kwargs})", "def test_get_order_list(self):\n self._create_orders(5)\n resp = self.app.get('/orders')\n print(resp.data)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def send_order(self, p_order, p_in_out, count):\n pass", "def paginated_handling(self) -> global___Snippet.PaginatedResponseHandling:", "async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)", "def post(cls):\n data = request.get_json() # token + list of item ids [1, 2, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n # Iterate over items and retrieve them from the database\n for _id, _count in item_id_quantities.most_common():\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n \n items.append(ItemInOrder(item_id=_id, quantity=_count))\n \n order = OrderModel(items = items, status=\"pending\")\n order.save_to_db()\n\n order.set_status(\"failed\") # assume the order would fail until it's completed\n #order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n\n return order_schema.dump(order), 200", "def test_get_orders(self):\n pass", "def test_retrieve_all_orders(self):\n response = self.api_test_client.get('{}/orders'.format(self.BASE_URL))\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_as_json(\n response)['orders'][0]['item_name'], self.ORDER['item_name'])\n self.assertEqual(response_as_json(\n response)['orders'][1]['item_name'], self.ORDER_2['item_name'])\n self.assertEqual(len(response_as_json(response)['orders']), 2)", "def processOrders(self, printOutput=False):\n orderData = self.trader.tradeData.get('orders',None)\n if orderData.get('success') == 0: #order data contains failed api call\n logging.error('Success=0: orderData: %s' % orderData)\n orderData = self.trader.tapi.getOrders()\n if printOutput:\n try:\n for key in orderData.get('return').keys():\n order = orderData.get('return')[key]\n print('ID: %s %s %s %s at %s' %(key,\n order['pair'],\n order['type'],\n order['amount'],\n order['rate']))\n except TypeError as e:\n # TODO add debug flag for printing output to console on errors\n print'TypeError in processOrders:'\n print e\n logging.error('Type error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n except KeyError as e:\n print'KeyError in processOrders'\n print e\n logging.error('Key error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n return orderData", "async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {}\n market = None\n response = None\n if symbol is None:\n response = await self.privatePostAuthROrders(self.extend(request, params))\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.privatePostAuthROrdersSymbol(self.extend(request, params))\n #\n # [\n # [\n # 95408916206, # Order ID\n # null, # Group Order ID\n # 1653322349926, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653322349926, # Created Timestamp in milliseconds\n # 1653322349927, # Updated Timestamp in milliseconds\n # -10, # Amount remaining(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Order type\n # null, # Previous Order Type\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.11, # Price\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ],\n # ]\n #\n return self.parse_orders(response, market, since, limit)", "def _handle_orders(self, response):\n response_type = response['type']\n state_updated = False\n if response_type == \"subscription_ack\":\n # Insure the subscription details are expected. Don't do anything.\n account_id = response['accountId']\n # TODO: should we do anything with the subscription id?\n # subscription_id = response['subscriptionId']\n symbol_filter = response['symbolFilter']\n api_session_filter = response['apiSessionFilter']\n event_type_filter = response['eventTypeFilter']\n if len(symbol_filter) or len(event_type_filter):\n raise Exception(\"No symbol or event type were specified, but \"\n \"filters were registered.\")\n if len(api_session_filter) != 1:\n raise Exception(\"1 session filter should have been registered.\"\n f\"{len(api_session_filter)} were registered.\")\n accepted_key = api_session_filter[0]\n if accepted_key != self._api_credentials\\\n .api_key:\n raise Exception(\"The whitelisted api session key does not \"\n \"match our session key.\")\n elif response_type == \"initial\":\n # Create a new order record for the initial response.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n existing_order = self.exchange_state.order(new_order.order_id)\n if existing_order:\n raise Exception(\"An initial response was received for an \"\n \"existing order (id: {new_order.order_id}).\")\n self.exchange_state.set_order(new_order.order_id, new_order)\n state_updated = True\n elif response_type == \"accepted\":\n # Create a new order. Mark the corresponding action as successful.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order accept message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n # I don't know if we need this status.\n a.status = exchanges.Action.Status.SUCCESS\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order accept message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"rejected\":\n order_response = OrderResponse.from_json_dict(response)\n log.warning(f\"An order was rejected. Reason: \" + response['reason'])\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order reject message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n a.status = exchanges.Action.Status.FAILED\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order reject message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"booked\":\n # I don't think we need to act on this.\n log.info(\"Order booked. Order id:{response['order_id']}.\")\n elif response_type == \"fill\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a fill response for an unknown order \"\n f\"(id:{order_response.order_id}).\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n # TODO: we could add some checks here to see if our fee calculation\n # is correct.\n elif response_type == \"cancelled\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n reason = response.get('reason', 'No reason provided.')\n # Unused:\n # cancel_command_id = response.get('cancel_command_id', None)\n if not order:\n raise Exception(\"Received a cancelled response for an unknown \"\n f\"order (id:{order_response.order_id}). Reason:\"\n f\"{reason}\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}. Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel response but can't find a \"\n \"matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.SUCCESS\n state_updated = True\n elif response_type == \"cancel_rejected\":\n order_response = OrderResponse.from_json_dict(response)\n reason = response.get('reason', 'No reason provided.')\n log.warning(\"Failed to cancel order (id: \"\n f\"{order_response.order_id}). Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel rejected response but can't \"\n \"find a matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.FAILED\n state_updated = True\n elif response_type == \"closed\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a close response for an unknown order\"\n f\" (id:{order_response.order_id}).\")\n log.info(\"Order close response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n else:\n raise Exception(f\"Unexpected response type: {response_type}.\")\n return state_updated", "def test91_GetFilledOrders(self):\n payload = PAYLOAD()\n payload['method'] = 'get_filled_orders'\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(len(res), 3)\n self.assertEqual(res[0]['receiving_address'], 'mrUedhEhZzbmdSbmd41CxoTZuTVgrwdL7p')\n self.assertEqual(res[0]['order_id'], 'DUMMY_ORD_2')\n sorted(res, key = lambda x: x['created_at'])\n payload['params']['timestamp'] = res[0]['created_at'] + 1\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(len(res), 2)", "def test_get_order_list(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def orders(self):\n big = BigCommerceAPI()\n response = big.get('orders')\n return response.text", "def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e", "async def handle_new_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "def get_all_orders_count(): \n data = order_obj.get_all_orders(\"1\")\n return data", "def received(self, page=None, per_page=None, sort_order=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'received')\r\n params = base.get_params(('page', 'per_page', 'sort_order'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def test_order_found(self):\n with mock_order_endpoint(order_number=self.ORDER_NUMBER, response=self.MOCK_ORDER):\n response = self.client.get(self.path)\n\n assert response.status_code == 200\n actual = json.loads(response.content.decode('utf-8'))\n assert actual == self.MOCK_ORDER", "async def fetch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = None\n request = {}\n if symbol is not None:\n market = self.market(symbol)\n request['symbol'] = market['id']\n if since is not None:\n request['startTime'] = self.iso8601(since)\n if limit is not None:\n request['count'] = limit\n request = self.deep_extend(request, params)\n # why the hassle? urlencode in python is kinda broken for nested dicts.\n # E.g. self.urlencode({\"filter\": {\"open\": True}}) will return \"filter={'open':+True}\"\n # Bitmex doesn't like that. Hence resorting to self hack.\n if 'filter' in request:\n request['filter'] = self.json(request['filter'])\n response = await self.privateGetOrder(request)\n return self.parse_orders(response, market, since, limit)", "def delivery_page(cls, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"orders.csv\")\n view = database_connection.get_view()\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) refresh orders view, \"\n \"(2) next page, \"\n \"(3) previous page, \"\n \"(4) examine order, \"\n \"Enter empty to go back \"\n )\n if choice not in ('1', '2', '3', '4'):\n break\n\n if choice=='1':\n view = database_connection.get_view()\n logger.log(view)\n\n # next page\n elif choice=='2': \n database_connection.next_page()\n view = database_connection.get_view()\n logger.log(view)\n\n # previous page\n elif choice=='3':\n database_connection.prev_page()\n view = database_connection.get_view()\n logger.log(view)\n\n elif choice=='4':\n\n # get product_id\n while True:\n order_id = input(\"Enter the order id: \")\n try:\n order_id = int(order_id)\n except:\n logger.log(\"order id should be an integer\")\n break\n\n table = database_connection.table\n order = table.loc[(table['order_id']==order_id), \"order\"][0] # order_id should be unique\n logger.log(json.dumps(json.loads(order), indent=1)) # pretty logger.log the json\n\n\n else:\n break", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)" ]
[ "0.68570167", "0.6552971", "0.61255354", "0.60908", "0.60400605", "0.5982922", "0.584163", "0.58357054", "0.5809135", "0.56956005", "0.56678724", "0.565327", "0.5648753", "0.5642741", "0.56354815", "0.5604037", "0.556626", "0.5559942", "0.5542466", "0.5541483", "0.55298144", "0.55151176", "0.5478685", "0.5472452", "0.5438841", "0.54053557", "0.539967", "0.5395816", "0.5395816", "0.53723973" ]
0.68449026
1
Add the orders from a the response to an Orders request to self.orders.
def add_orders(self, response_data): orders = response_data[self.DATA][self.DATA] for order in orders: self.orders.append(self.process_order_data(order))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_orders(self, response):\n response_type = response['type']\n state_updated = False\n if response_type == \"subscription_ack\":\n # Insure the subscription details are expected. Don't do anything.\n account_id = response['accountId']\n # TODO: should we do anything with the subscription id?\n # subscription_id = response['subscriptionId']\n symbol_filter = response['symbolFilter']\n api_session_filter = response['apiSessionFilter']\n event_type_filter = response['eventTypeFilter']\n if len(symbol_filter) or len(event_type_filter):\n raise Exception(\"No symbol or event type were specified, but \"\n \"filters were registered.\")\n if len(api_session_filter) != 1:\n raise Exception(\"1 session filter should have been registered.\"\n f\"{len(api_session_filter)} were registered.\")\n accepted_key = api_session_filter[0]\n if accepted_key != self._api_credentials\\\n .api_key:\n raise Exception(\"The whitelisted api session key does not \"\n \"match our session key.\")\n elif response_type == \"initial\":\n # Create a new order record for the initial response.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n existing_order = self.exchange_state.order(new_order.order_id)\n if existing_order:\n raise Exception(\"An initial response was received for an \"\n \"existing order (id: {new_order.order_id}).\")\n self.exchange_state.set_order(new_order.order_id, new_order)\n state_updated = True\n elif response_type == \"accepted\":\n # Create a new order. Mark the corresponding action as successful.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order accept message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n # I don't know if we need this status.\n a.status = exchanges.Action.Status.SUCCESS\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order accept message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"rejected\":\n order_response = OrderResponse.from_json_dict(response)\n log.warning(f\"An order was rejected. Reason: \" + response['reason'])\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order reject message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n a.status = exchanges.Action.Status.FAILED\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order reject message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"booked\":\n # I don't think we need to act on this.\n log.info(\"Order booked. Order id:{response['order_id']}.\")\n elif response_type == \"fill\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a fill response for an unknown order \"\n f\"(id:{order_response.order_id}).\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n # TODO: we could add some checks here to see if our fee calculation\n # is correct.\n elif response_type == \"cancelled\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n reason = response.get('reason', 'No reason provided.')\n # Unused:\n # cancel_command_id = response.get('cancel_command_id', None)\n if not order:\n raise Exception(\"Received a cancelled response for an unknown \"\n f\"order (id:{order_response.order_id}). Reason:\"\n f\"{reason}\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}. Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel response but can't find a \"\n \"matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.SUCCESS\n state_updated = True\n elif response_type == \"cancel_rejected\":\n order_response = OrderResponse.from_json_dict(response)\n reason = response.get('reason', 'No reason provided.')\n log.warning(\"Failed to cancel order (id: \"\n f\"{order_response.order_id}). Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel rejected response but can't \"\n \"find a matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.FAILED\n state_updated = True\n elif response_type == \"closed\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a close response for an unknown order\"\n f\" (id:{order_response.order_id}).\")\n log.info(\"Order close response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n else:\n raise Exception(f\"Unexpected response type: {response_type}.\")\n return state_updated", "async def handle_new_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "def orders(self, orders):\n\n self._orders = orders", "def orders(self, orders):\n\n self._orders = orders", "async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def add_order(self, orders):\n if isinstance(orders, list):\n for order in orders:\n self._add_order(order)\n else:\n self._add_order(orders)", "def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")", "def received_orders(self, received_orders):\n\n self._received_orders = received_orders", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)", "async def handle_cancel_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "async def on_orders_replaced(self, orders: List[MetatraderOrder]):\n self._orders = orders", "def extract ( self, response ):\n\t\tresponse = response.json()['response']\n\t\traworders = response['orderstatus']['order']\n\n\t\tif not isinstance(raworders, list):\n\t\t\traworders = [raworders]\n\n\t\torders = [ Order(fixml=x['fixmlmessage']) for x in raworders]\n\n\t\treturn orders", "def get_orders(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params={**kwargs})", "def add_responses(self, response):\n self.responses = self.responses.union(set(response) if type(response) is not set else response)\n # return Post(self.title, self.timestamp, self.subject, self.content, self.resto,\n # self.responses.union(set(response) if type(response) is not set else response))", "def make_order_request(self, page):\n return api_methods.Orders(\n page=page,\n per_page=self.PER_PAGE,\n from_date=self.from_date,\n start_date=self.start_date,\n end_date=self.end_date,\n deal_id=self.deal_id,\n ).call()", "def get_orders(self, *orders):\n return Orders(self, orders)", "def ingest_results(self, results):\n \n if isinstance(results, dict):\n if 'items' in results.keys():\n results = results['items']\n \n for idx, r in enumerate(results):\n \n # First get the image from the ImageList\n record_id = r['recordId']\n image = None\n if self.img_lst is not None:\n image = self.img_lst.get_image(record_id)\n image.set_metadata('Yes', 'orderSubmitted')\n \n # Create the OrderItem\n order_item = OrderItem(self.eod)\n if image is not None:\n order_item.add_image(image)\n order_item.parse_record(r)\n \n # Update or create Order\n order_id = order_item.get_orderId()\n order = self.get_order(order_id)\n if order is None:\n order = Order(order_id)\n order.add_item(order_item)\n self.order_lst.append(order)\n else:\n order.add_item(order_item)\n \n if image is not None:\n img_mdata = image.get_metadata()\n image.set_metadata(order_id, 'orderId')\n image.set_metadata(r.get('status'), 'orderStatus')\n image.set_metadata(r.get('statusMessage'), 'statusMessage')\n image.set_metadata(r.get('dateRapiOrdered'), \\\n 'dateRapiOrdered')", "async def handle_get_active_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def place_order(self, order_event):\n self._check_day_data(order_event.order_time)\n if order_event.order_type == 'MARKET':\n self._fill_market_order(order_event)\n elif order_event.order_type == 'LIMIT':\n if self._check_limit_order(order_event, order_event.order_time):\n pass\n self.resting_orders.append(order_event)", "async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)", "def get_orders():\n\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\n\t# Checks if the reader exists in the database\n\treader = Reader.query.filter_by(email=email).first()\n\tif not reader:\n\t\treturn bad_request(\"Reader does not exist.\")\n\n\t# Gets a list of all the users requested rooms\n\troom_relation = RoomRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(Room, Room.id == RoomRequest.room_id) \\\n\t\t.join(ApprovesRoomRequest, ApprovesRoomRequest.room_request_id == RoomRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesRoomRequest.approver_id) \\\n\t\t.all()\n\troom_orders = [\n\t\t{\"room_id\": x.room_id, \"name\": x.room.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\t\"date\": x.datetime_requested, \"type\": \"Room\"} for x in room_relation]\n\n\t# Gets a list of all the users requested access groups\n\tag_relation = AccessGroupRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id) \\\n\t\t.join(ApprovesAgRequest, ApprovesAgRequest.ag_request_id == AccessGroupRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesAgRequest.approver_id) \\\n\t\t.all()\n\tag_orders = [\n\t\t{\"ag_id\": x.ag_id, \"name\": x.ag.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\"date\": x.datetime_requested, \"type\": \"Access group\"} for x in ag_relation\n\t]\n\n\treturn ok({\"orders\": room_orders + ag_orders})", "def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def post(cls):\n data = request.get_json() # token ,item_ids [1, 3, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n for _id, count in item_id_quantities.most_common(): # [(5,3),(3,2),(1,1)]\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n\n \"\"\"ItemsInOrder get item_id and quantity, however\n order_id will be set later on,\n when items is passed into OrderModel, because back_populates=\"order\"\n it goes over to order column of ItemsInOrder table,\n and set order_id for each of those item in OrderModel\n to be the order to which you have added those items\"\"\"\n items.append(ItemsInOrder(item_id=_id, quantity=count))\n\n # items is a list of ItemsInOrder obj\n order = OrderModel(items=items, status=\"pending\") # pending until send to Stripe\n order.save_to_db() # this does not submit to Stripe\n\n try:\n order.set_status(\"failed\") # assume the order would fail until it's completed\n order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n return order_schema.dump(order), 200\n # the following error handling is advised by Stripe, although the handling implementations are identical,\n # we choose to specify them separately just to give the students a better idea what we can expect\n except error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n return e.json_body, e.http_status\n except error.RateLimitError as e:\n # Too many requests made to the API too quickly\n return e.json_body, e.http_status\n except error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n return e.json_body, e.http_status\n except error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n return e.json_body, e.http_status\n except error.APIConnectionError as e:\n # Network communication with Stripe failed\n return e.json_body, e.http_status\n except error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n return e.json_body, e.http_status\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n print(e)\n return {\"message\": gettext(\"order_error\")}, 500", "def post(cls):\n data = request.get_json() # token + list of item ids [1, 2, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n # Iterate over items and retrieve them from the database\n for _id, _count in item_id_quantities.most_common():\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n \n items.append(ItemInOrder(item_id=_id, quantity=_count))\n \n order = OrderModel(items = items, status=\"pending\")\n order.save_to_db()\n\n order.set_status(\"failed\") # assume the order would fail until it's completed\n #order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n\n return order_schema.dump(order), 200", "def send_orders_created(order_ids):\n ids = [{\"id\": i} for i in order_ids]\n return make_response(jsonify({\"orders\": ids}), 201)", "def orderInfo(self, orderInfo):\r\n\r\n self._orderInfo = orderInfo", "def get_orders(self):\n return self.order_lst", "def get_orders_list(\n self\n ) -> list:\n\n response = self.session.get(\"http://automationpractice.com/index.php?controller=history\")\n\n self.HTMLParser.set_html(response.text)\n\n tbody = self.HTMLParser.find_elements_by_xpath(\"//tbody/tr\")\n\n if not len(tbody):\n raise NoOrderError()\n\n orders = list()\n\n for tr in tbody:\n\n tr = self.HTMLParser.convert_node(tr)\n tds = tr.xpath(\"//td\")\n\n orders.append({\n \"reference\": self._find_reference(tds[0]),\n \"date\": tds[1].text_content().strip(),\n \"value\": tds[2].get(\"data-value\"),\n \"payment_method\": tds[3].text_content(),\n \"status\": self._find_status(tds[4]),\n \"invoice_link\": self._find_invoice_link(tds[5]),\n \"id_order\": self._find_id(tds[5])\n })\n\n return orders" ]
[ "0.69409347", "0.6667287", "0.6545564", "0.6545564", "0.6460046", "0.63917613", "0.6235419", "0.61754966", "0.6097969", "0.60590065", "0.6050112", "0.6049591", "0.60148126", "0.5854137", "0.5701919", "0.5696883", "0.5695405", "0.56953824", "0.56910616", "0.56895924", "0.56578636", "0.5620063", "0.5560786", "0.5502911", "0.54868954", "0.54864234", "0.54421884", "0.54113144", "0.54099673", "0.53712964" ]
0.8209479
0
Return the response to an Orders request for a page of orders.
def make_order_request(self, page): return api_methods.Orders( page=page, per_page=self.PER_PAGE, from_date=self.from_date, start_date=self.start_date, end_date=self.end_date, deal_id=self.deal_id, ).call()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_orders(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params={**kwargs})", "def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")", "def list_orders(\n page: int = 1,\n limit: int = 15,\n duration: int = 180,\n current_user: CurrentUser = Depends(AuthService.verify_auth_access_token),\n):\n past_date = datetime.today().date() - timedelta(days=duration)\n orders = Order.objects.filter(\n is_active=True,\n order_session__user_id=current_user.user_id,\n created_at__gt=past_date,\n ).order_by(\"-created_at\")\n orders = orders[(page - 1) * limit : (page - 1) * limit + limit]\n orders = parse_obj_as(List[OrderResponse], list(orders))\n return JSONResponse(\n content=[order.simple_dict() for order in orders],\n status_code=status.HTTP_200_OK,\n )", "def orders(self):\n big = BigCommerceAPI()\n response = big.get('orders')\n return response.text", "async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def orders ( self, block: bool = True ):\n\tresult = OutstandingOrders(\n\t\tauth\t\t= self.auth,\n\t\taccount_nbr = self.account_nbr,\n\t\tblock\t\t= block\n\t).request()\n\n\treturn result", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def retrieve(self, **kwargs):\n return self.client.execute(\"order\", \"GET\", kwargs)", "def get_all_orders():\n response = requests.get(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n )\n return response.json()[\"orders\"]", "def test_retrieve_all_orders(self):\n response = self.api_test_client.get('{}/orders'.format(self.BASE_URL))\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_as_json(\n response)['orders'][0]['item_name'], self.ORDER['item_name'])\n self.assertEqual(response_as_json(\n response)['orders'][1]['item_name'], self.ORDER_2['item_name'])\n self.assertEqual(len(response_as_json(response)['orders']), 2)", "def get_next_page(self, raw=False):\n return self.account.get_orders(page=self.current_page + 1, raw=raw)", "def get_all_orders():", "def list(self, request):\n orders = Order.objects.all()\n\n customer = self.request.query_params.get('customer_id', None)\n complete = self.request.query_params.get('complete', None)\n payment = self.request.query_params.get('payment_id', None)\n if customer is not None:\n if complete == \"0\":\n orders = orders.filter(customer__id=customer, payment_type__id__isnull=True)\n if complete == \"1\":\n orders = orders.filter(customer__id=customer, payment_type__id__isnull=False)\n\n if payment is not None:\n orders = orders.filter(payment_type__id=payment)\n if complete is not None:\n print(\"EEEEEEEEEEEEEEEEEEEEEEEEEEEE\")\n if complete == \"1\":\n orders = orders.filter(payment_type__id__isnull=False)\n elif complete == \"0\":\n orders = orders.filter(payment_type__id__isnull=True)\n\n serializer = OrderSerializer(\n orders, many=True, context={'request': request})\n return Response(serializer.data)", "def received(self, page=None, per_page=None, sort_order=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'received')\r\n params = base.get_params(('page', 'per_page', 'sort_order'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def get_all_orders(): \n data = order_obj.get_all_orders()\n return data", "def list(self, limit=10, offset=0):\n LOG.debug('Listing orders - offset {0} limit {1}'.format(offset,\n limit))\n href = '{0}/{1}'.format(self.api.base_url, self.entity)\n params = {'limit': limit, 'offset': offset}\n resp = self.api.get(href, params)\n\n return [Order(o) for o in resp['orders']]", "def query_orders(self):\n return self._call_txtrader_api('query_orders', {})", "def get(self):\n orders = db.session.query(models.Order)\n args = order_query_parser.parse_args()\n order_id = args['order_id']\n if order_id is not None:\n orders = orders.filter_by(id=order_id)\n copy = args['copy_id']\n if copy is not None:\n orders = orders.filter_by(copy=copy)\n borrower = args['borrower']\n if borrower is not None:\n orders = orders.filter_by(borrower=borrower)\n\n copy_owner = args['copy_owner']\n if copy_owner is not None:\n orders = orders.filter_by(copy_owner=copy_owner)\n\n status = args['order_status']\n if status is not None:\n orders = orders.filter_by(status=status)\n date = args['return_date']\n if date is not None:\n orders = orders.filter_by(expire=date)\n if id is None and copy is None and borrower is None and copy_owner is None and status is None:\n return 'Please provide searching parameters', 400\n\n return [order.serialize() for order in orders], 200", "def get(self):\n\n from advertise import Orders, Advert\n\n URL = self.request.url\n strURLlist = URL.split(\"/\")\n strDepositReference = strURLlist[len(strURLlist) - 1]\n\n # The Actual Order requested\n findRequest = Orders.query(Orders.deposit_reference == strDepositReference)\n thisOrderList = findRequest.fetch()\n\n if len(thisOrderList) > 0:\n thisOrder = thisOrderList[0]\n else:\n thisOrder = Orders()\n\n # Organization details of the owner of the account\n findRequest = Organization.query(Organization.strOrganizationID == thisOrder.organization_id)\n thisOrgList = findRequest.fetch()\n\n if len(thisOrgList) > 0:\n thisOrg = thisOrgList[0]\n else:\n thisOrg = Organization()\n\n # Main Account Details of the owner of the account\n findRequest = Accounts.query(Accounts.uid == thisOrder.uid)\n thisAccountList = findRequest.fetch()\n\n if len(thisAccountList) > 0:\n thisAccount = thisAccountList[0]\n else:\n thisAccount = Accounts()\n\n # The Advert being paid for\n findRequest = Advert.query(Advert.advert_id == thisOrder.advert_id)\n thisAdvertList = findRequest.fetch()\n\n if len(thisAdvertList) > 0:\n thisAdvert = thisAdvertList[0]\n else:\n thisAdvert = Advert()\n\n from advertise import Payments # This is to force the use of payments class in adverts\n # Payment details Advert\n findRequest = Payments.query(Payments.order_id == thisOrder.order_id)\n thisRelatedPaymentList = findRequest.fetch()\n\n # User Organization Payment Details\n findRequest = Payments.query(Payments.organization_id == thisOrder.organization_id)\n thisOrganizationPaymentsList = findRequest.fetch()\n\n template = template_env.get_template('templates/dashboard/payments/AdvertOrders.html')\n context = {'thisOrder': thisOrder, 'thisOrg': thisOrg, 'thisAccount': thisAccount, 'thisAdvert': thisAdvert,\n 'thisRelatedPaymentList': thisRelatedPaymentList,\n 'thisOrganizationPaymentsList': thisOrganizationPaymentsList}\n self.response.write(template.render(context))", "def get_orders(request):\n close_old_connections()\n try:\n # Give all orders maded on the given date.\n return Order.objects.filter(\n date__date=request.GET['date']).order_by('-date')\n except MultiValueDictKeyError:\n # Give all orders today.\n return Order.objects.filter(\n date__date=datetime.now().date()).order_by('-date')", "def test_get_order_list(self):\n self._create_orders(5)\n resp = self.app.get('/orders')\n print(resp.data)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def get_orders(self, oid=None, include_expired=False, orderid=None):\n return self.request(\n 'get',\n '%sorders/%s%s' % (\n safeformat('objects/{:int}/', oid) if oid else \"\",\n \"all/\" if include_expired else \"\",\n safeformat('{:int}', orderid) if orderid else \"\"\n )\n )", "def orders(request):\n return render(request, 'orders/orderList.html')", "def list_order(self, orderNo):\r\n param = {}\r\n param['orderNo'] = orderNo\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/order', param, self.timeout)", "def get_paginated_response(self, data):\n return Response(\n OrderedDict([\n ('count', self.page.paginator.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data),\n ]))", "def get(self):\n orders = db.session.query(models.Order)\n return [order.serialize() for order in orders], 200", "def test_get_order_list(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def get_orders_list(\n self\n ) -> list:\n\n response = self.session.get(\"http://automationpractice.com/index.php?controller=history\")\n\n self.HTMLParser.set_html(response.text)\n\n tbody = self.HTMLParser.find_elements_by_xpath(\"//tbody/tr\")\n\n if not len(tbody):\n raise NoOrderError()\n\n orders = list()\n\n for tr in tbody:\n\n tr = self.HTMLParser.convert_node(tr)\n tds = tr.xpath(\"//td\")\n\n orders.append({\n \"reference\": self._find_reference(tds[0]),\n \"date\": tds[1].text_content().strip(),\n \"value\": tds[2].get(\"data-value\"),\n \"payment_method\": tds[3].text_content(),\n \"status\": self._find_status(tds[4]),\n \"invoice_link\": self._find_invoice_link(tds[5]),\n \"id_order\": self._find_id(tds[5])\n })\n\n return orders", "def test_get_order_list(self):\n customer = Customer.objects.get(first_name=\"Larosh\", last_name=\"Tanbari\")\n orders = PizzaOrder.objects.filter(customer=customer)\n response = self.client.get(\n reverse('order_list',\n kwargs={'customer': orders.custome}),\n format=\"json\"\n )\n serializer = PizzaOrderSerializer(orders, many=True)\n\n self.assertEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)" ]
[ "0.6947997", "0.65630955", "0.6448705", "0.63800734", "0.63783544", "0.6324955", "0.6280075", "0.6280075", "0.6201901", "0.6172101", "0.6155729", "0.61465985", "0.60682786", "0.6010717", "0.59935415", "0.5976142", "0.5967135", "0.5932509", "0.59092224", "0.58723015", "0.5858114", "0.5855058", "0.5816937", "0.5799341", "0.5751912", "0.57455647", "0.5722606", "0.57105774", "0.5709737", "0.5697737" ]
0.72716135
0
Test case for tarfile bundling and unbundling
def testTarBundling(self): try: tP = os.path.join(self.__workPath, "t0.tar.gz") dirPath = os.path.join(self.__inpDirPath, "topdir") ok = self.__fileU.bundleTarfile(tP, [dirPath], mode="w:gz", recursive=True) self.assertTrue(ok) numBytes = self.__fileU.size(tP) self.assertGreaterEqual(numBytes, 250) # md5 = self.__fileU.hash(tP, hashType="md5") self.assertTrue(md5 is not None) # ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath) self.assertTrue(ok) # tP = os.path.join(self.__workPath, "t1.tar.gz") dirPathList = [os.path.join(self.__inpDirPath, "topdir", "subdirA"), os.path.join(self.__inpDirPath, "topdir", "subdirB")] ok = self.__fileU.bundleTarfile(tP, dirPathList, mode="w:gz", recursive=True) self.assertTrue(ok) # ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath) self.assertTrue(ok) tP = os.path.join(self.__workPath, "t2.tar") dirPathList = [os.path.join(self.__inpDirPath, "topdir", "subdirA"), os.path.join(self.__inpDirPath, "topdir", "subdirB")] ok = self.__fileU.bundleTarfile(tP, dirPathList, mode="w", recursive=True) self.assertTrue(ok) # ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath) self.assertTrue(ok) except Exception as e: logger.exception("Failing with %s", str(e)) self.fail()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unpack(self):\n if not os.path.isfile(akrr_tar_gz):\n raise Exception(\"Should do test_packager first\")\n \n if os.path.exists(cfg.akrr_home):\n shutil.rmtree(cfg.akrr_home)\n \n if verbosity>=3: print \"\\n\"+\"~\"*80\n \n #start bash shell\n bash = self.getBash()\n \n output=bash.runcmd('tar -xvf {akrr_tar_gz} -C {above_akrr_home}'.format(akrr_tar_gz=akrr_tar_gz,above_akrr_home=os.path.abspath(os.path.join(cfg.akrr_home, \"..\"))),printOutput=True)\n output=bash.runcmd('export AKRR_HOME={akrr_home}'.format(akrr_home=cfg.akrr_home),printOutput=True)\n output=bash.runcmd('cd $AKRR_HOME',printOutput=True)\n output=bash.runcmd('pwd',printOutput=True)\n \n if verbosity>=3: print \"~\"*80\n #test some files presence\n filesToCheck=['src/akrr.py',\n 'src/akrrscheduler.py']\n for f in filesToCheck:\n self.assertEqual(os.path.isfile(os.path.abspath(os.path.join(cfg.akrr_home, f))), True, \"AKRR distribution archive can not be unpacked\")", "def test_tarballs_pre_extracted(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir,\n \"vlsi.technology.extracted_tarballs_dir\": tech_dir_base\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/foobar.tar.gz/test.gds\".format(tech_dir_base)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def test_unsafe_tar(self):\r\n\r\n def try_tar(tarpath):\r\n with open(tarpath) as tar:\r\n args = {\"name\": tarpath, \"course-data\": [tar]}\r\n resp = self.client.post(self.url, args)\r\n self.assertEquals(resp.status_code, 400)\r\n self.assertTrue(\"SuspiciousFileOperation\" in resp.content)\r\n\r\n try_tar(self._fifo_tar())\r\n try_tar(self._symlink_tar())\r\n try_tar(self._outside_tar())\r\n try_tar(self._outside_tar2())\r\n # Check that `import_status` returns the appropriate stage (i.e.,\r\n # either 3, indicating all previous steps are completed, or 0,\r\n # indicating no upload in progress)\r\n resp_status = self.client.get(\r\n reverse_course_url(\r\n 'import_status_handler',\r\n self.course.id,\r\n kwargs={'filename': os.path.split(self.good_tar)[1]}\r\n )\r\n )\r\n import_status = json.loads(resp_status.content)[\"ImportStatus\"]\r\n self.assertIn(import_status, (0, 3))", "def test_tarballs_not_extracted(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/extracted/foobar.tar.gz/test.gds\".format(tech_dir)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"", "async def test_unpacker_do_work_bundle(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient.request\", new_callable=AsyncMock)\n mock_zipfile_init = mocker.patch(\"zipfile.ZipFile.__init__\")\n mock_zipfile_init.return_value = None\n mock_zipfile_write = mocker.patch(\"zipfile.ZipFile.extractall\")\n mock_zipfile_write.return_value = None\n mock_json_load = mocker.patch(\"json.load\")\n mock_json_load.return_value = {\n \"files\": [\n {\n \"logical_name\": \"/full/path/to/file/in/data/warehouse.tar.bz2\",\n \"file_size\": 1234567890,\n \"checksum\": {\n \"adler32\": \"89d5efeb\",\n \"sha512\": \"c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570\",\n },\n }\n ]\n }\n mock_shutil_move = mocker.patch(\"shutil.move\")\n mock_shutil_move.return_value = None\n mock_lta_checksums = mocker.patch(\"lta.unpacker.lta_checksums\")\n mock_lta_checksums.return_value = {\n \"adler32\": \"89d5efeb\",\n \"sha512\": \"c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570\",\n }\n mock_os_path_getsize = mocker.patch(\"os.path.getsize\")\n mock_os_path_getsize.return_value = 1234567890\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.return_value = None\n mock_os_scandir = mocker.patch(\"os.scandir\")\n mock_os_scandir.return_value.__enter__.return_value = []\n altfc_mock = mocker.patch(\"lta.unpacker.Unpacker._add_location_to_file_catalog\", new_callable=AsyncMock)\n altfc_mock.return_value = False\n p = Unpacker(config, logger_mock)\n BUNDLE_OBJ = {\n \"bundle_path\": \"/mnt/lfss/jade-lta/bundler_out/9a1cab0a395211eab1cbce3a3da73f88.zip\",\n \"uuid\": \"f74db80e-9661-40cc-9f01-8d087af23f56\",\n \"source\": \"NERSC\",\n \"dest\": \"WIPAC\",\n \"path\": \"/full/path/to/file\",\n \"files\": [{\"logical_name\": \"/full/path/to/file/in/data/warehouse.tar.bz2\", }],\n }\n with patch(\"builtins.open\", mock_open(read_data=\"data\")) as metadata_mock:\n await p._do_work_bundle(lta_rc_mock, BUNDLE_OBJ)\n metadata_mock.assert_called_with(mocker.ANY)", "def test_tarballs_pre_extracted_tech_specific(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir,\n \"vlsi.technology.extracted_tarballs_dir\": \"/should/not/be/used\",\n \"technology.dummy28.extracted_tarballs_dir\": tech_dir_base\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/foobar.tar.gz/test.gds\".format(tech_dir_base)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def test_06_verify_tar01(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = False\n mock_call.return_value = 0\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)", "def test_IsPackage_files():\n with tempfile.NamedTemporaryFile() as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".tar.bz2\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".dpack.tar.bz2\") as f:\n assert dpack._IsPackage(pathlib.Path(f.name))", "def test_07_verify_tar02(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 0\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertTrue(status)", "def diff_bundle_contents():\n dir_package = os.listdir(ARCHIVE_TARGET)\n dir_setup = os.listdir(MODEL_TARGET)\n if dir_package != dir_setup:\n return True\n for bundle in dir_package:\n os.chdir(ARCHIVE_TARGET)\n subprocess.run([\"git\", \"clone\", bundle])\n os.chdir(\"..\")\n os.chdir(MODEL_TARGET)\n subprocess.run([\"git\", \"clone\", bundle])\n os.chdir(\"..\")\n dcmp = filecmp.dircmp(\n join(ARCHIVE_TARGET, bundle[: bundle.find(\".bundle\")]),\n join(MODEL_TARGET, bundle[: bundle.find(\".bundle\")]),\n )\n diff = Diff(dcmp)\n if diff.run():\n return True\n return False", "def test_conversion(tmp_path, wheel_path):\n\n os.chdir(str(tmp_path))\n\n # convert wheel to debian source package\n with patch.object(sys, 'argv', ['', '-x', str(wheel_path.parent)]):\n with patch.object(wheel2deb.sys, \"exit\") as mock_exit:\n wheel2deb.main()\n assert mock_exit.call_args[0][0] == 0\n\n unpack_path = tmp_path / 'output/python3-foobar_0.1.0-1~w2d0_all'\n assert unpack_path.exists()\n\n # build source package\n with patch.object(sys, 'argv', ['', 'build']):\n with patch.object(wheel2deb.sys, \"exit\") as mock_exit:\n wheel2deb.main()\n assert mock_exit.call_args[0][0] == 0\n\n # output dir should contain a .deb\n package_list = list((tmp_path / 'output').glob('*.deb'))\n assert package_list\n\n package_path = package_list[0]\n assert package_path.name.startswith('python3-foobar_0.1.0-1')\n\n package_hash = digests(package_list[0])\n\n # check that the entrypoint will be installed in /usr/bin\n entrypoint = (unpack_path / 'debian/python3-foobar/usr/bin/entrypoint')\n assert entrypoint.exists()\n\n # check shebang\n with open(str(entrypoint), 'r') as f:\n shebang = f.readline()\n assert shebang.startswith('#!/usr/bin')\n\n # idempotence: delete package, rerun build command\n # and check that both packages have the same hash\n package_list[0].unlink()\n with patch.object(sys, 'argv', ['', 'build']):\n with patch.object(wheel2deb.sys, \"exit\") as mock_exit:\n wheel2deb.main()\n assert mock_exit.call_args[0][0] == 0\n assert digests(package_path) == package_hash", "def test_08_verify_tar03(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 1\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)", "def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))", "def main():\n\n print \"Starting tar-maker script..\"\n # String of files we're going to be looking for\n files=\"runlocaltests.py testprocess.py verifyfiles.mix cleanup_deploy.py hashes.dict upgrade_nodes.sh deploy_helper.py\"\n\n # TODO: add list of 'optional files' to include\n\n # get the files passed in as arguments\n files_from_args = ''\n # 1 skips this file name\n print\n \n for eachfile in range(1, len(sys.argv)):\n print \"Adding custom file: \"+sys.argv[eachfile]\n files_from_args+=' '+sys.argv[eachfile]\n print\n # mash the two strings together now\n files+=files_from_args\n\n # Total number of files split by spaces\n total_files=len(files.split(' '))\n\n # Counter for found files\n num_files_found=0\n\n # Temporary tar, incrementally we'll build it up\n # Will remove the temp files (since I use -update flag)\n # for building up the .tar\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n\n\n for filename in files.split(' '):\n print ' Looking for '+filename+' in '+os.getcwd()\n if os.path.isfile('./'+filename):\n print ' File found!'\n num_files_found += 1\n shellexec('tar -rf deploy.tar.temp '+filename)\n else:\n print ' WARNING: '+filename+' NOT FOUND'\n\n print\n print \"Found \"+str(num_files_found)+\" of \"+str(total_files)+\" necessary files.\"\n print\n\n # Did we find all of the files?\n if num_files_found == total_files:\n print\n print 'All files found, finishing tar..'\n # rename the file to the final name.\n # this will over-write current deploy.tar in the dir if one exists \n shellexec('mv deploy.tar.temp deploy.tar')\n return 0\n else:\n print 'FATAL ERROR: Not all the files where found, please check that '\n print ' this script is in the same directory as the files. '\n print\n print \"Cleaning up temp files...\"\n \n # remove deploy.tar.temp only if it exists.\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n \n print\n print 'Finished (with errors)'\n return 1", "def do_pack():\n\n local(\"mkdir -p versions\")\n current = dt.now()\n current = current.now()\n tgz = \"web_static_{}.tgz\".format(current.strftime(\"%Y%m%d%H%M%S\"))\n working = local(\"tar -cavf versions/{} web_static\".format(tgz))\n\n if working.failed:\n return None\n else:\n return \"versions/{}\".format(tgz)", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def assert_do_gen_package(config: dict) -> None:\n with tempfile.TemporaryDirectory() as tmpdir:\n package_filename = os.path.join(tmpdir, 'package.tar.xz')\n package_extract_dir = os.path.join(tmpdir, 'package')\n\n # Build and extract package.\n gen.do_gen_package(config, package_filename)\n os.makedirs(package_extract_dir)\n with tarfile.open(package_filename) as package_tarball:\n package_tarball.extractall(package_extract_dir)\n\n assert_package_contents(config, package_extract_dir)", "def _build_collection_tar(\n b_collection_path, # type: bytes\n b_tar_path, # type: bytes\n collection_manifest, # type: CollectionManifestType\n file_manifest, # type: FilesManifestType\n): # type: (...) -> str\n files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')\n collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)\n collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')\n\n with _tempdir() as b_temp_path:\n b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))\n\n with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:\n # Add the MANIFEST.json and FILES.json file to the archive\n for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:\n b_io = BytesIO(b)\n tar_info = tarfile.TarInfo(name)\n tar_info.size = len(b)\n tar_info.mtime = int(time.time())\n tar_info.mode = 0o0644\n tar_file.addfile(tarinfo=tar_info, fileobj=b_io)\n\n for file_info in file_manifest['files']: # type: ignore[union-attr]\n if file_info['name'] == '.':\n continue\n\n # arcname expects a native string, cannot be bytes\n filename = to_native(file_info['name'], errors='surrogate_or_strict')\n b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))\n\n def reset_stat(tarinfo):\n if tarinfo.type != tarfile.SYMTYPE:\n existing_is_exec = tarinfo.mode & stat.S_IXUSR\n tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644\n tarinfo.uid = tarinfo.gid = 0\n tarinfo.uname = tarinfo.gname = ''\n\n return tarinfo\n\n if os.path.islink(b_src_path):\n b_link_target = os.path.realpath(b_src_path)\n if _is_child_path(b_link_target, b_collection_path):\n b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))\n\n tar_info = tarfile.TarInfo(filename)\n tar_info.type = tarfile.SYMTYPE\n tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')\n tar_info = reset_stat(tar_info)\n tar_file.addfile(tarinfo=tar_info)\n\n continue\n\n # Dealing with a normal file, just add it by name.\n tar_file.add(\n to_native(os.path.realpath(b_src_path)),\n arcname=filename,\n recursive=False,\n filter=reset_stat,\n )\n\n shutil.copy(to_native(b_tar_filepath), to_native(b_tar_path))\n collection_name = \"%s.%s\" % (collection_manifest['collection_info']['namespace'],\n collection_manifest['collection_info']['name'])\n tar_path = to_text(b_tar_path)\n display.display(u'Created collection for %s at %s' % (collection_name, tar_path))\n return tar_path", "def test_update_software_asset_bundle(self):\n pass", "async def test_unpacker_do_work_bundle_mismatch_checksum(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient.request\", new_callable=AsyncMock)\n mock_zipfile_init = mocker.patch(\"zipfile.ZipFile.__init__\")\n mock_zipfile_init.return_value = None\n mock_zipfile_write = mocker.patch(\"zipfile.ZipFile.extractall\")\n mock_zipfile_write.return_value = None\n mock_json_load = mocker.patch(\"json.load\")\n mock_json_load.return_value = {\n \"files\": [\n {\n \"logical_name\": \"/full/path/to/file/in/data/warehouse.tar.bz2\",\n \"file_size\": 1234567890,\n \"checksum\": {\n \"adler32\": \"89d5efeb\",\n \"sha512\": \"c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570\",\n },\n }\n ]\n }\n mock_shutil_move = mocker.patch(\"shutil.move\")\n mock_shutil_move.return_value = None\n mock_lta_checksums = mocker.patch(\"lta.unpacker.lta_checksums\")\n mock_lta_checksums.return_value = {\n \"adler32\": \"89d5efeb\",\n \"sha512\": \"919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570c\",\n }\n mock_os_path_getsize = mocker.patch(\"os.path.getsize\")\n mock_os_path_getsize.return_value = 1234567890\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.return_value = None\n altfc_mock = mocker.patch(\"lta.unpacker.Unpacker._add_location_to_file_catalog\", new_callable=AsyncMock)\n altfc_mock.return_value = False\n p = Unpacker(config, logger_mock)\n BUNDLE_OBJ = {\n \"bundle_path\": \"/mnt/lfss/jade-lta/bundler_out/9a1cab0a395211eab1cbce3a3da73f88.zip\",\n \"uuid\": \"f74db80e-9661-40cc-9f01-8d087af23f56\",\n \"source\": \"NERSC\",\n \"dest\": \"WIPAC\",\n \"path\": \"/full/path/to/file\",\n \"files\": [{\"logical_name\": \"/full/path/to/file/in/data/warehouse.tar.bz2\", }],\n }\n with patch(\"builtins.open\", mock_open(read_data=\"data\")) as metadata_mock:\n with pytest.raises(Exception):\n await p._do_work_bundle(lta_rc_mock, BUNDLE_OBJ)\n metadata_mock.assert_called_with(mocker.ANY)", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_package_compile(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n test_package_dir = os.path.join(test_data_dir, 'testpackage')\n tmpdir = tempfile.mkdtemp()\n cwd = os.getcwd()\n try:\n os.chdir(test_package_dir)\n subprocess.check_call(['python3', 'setup.py', 'sdist', '--format=gztar', '-d', tmpdir])\n package_path = os.path.join(tmpdir, 'testsample-0.1.tar.gz')\n target_tar = os.path.join(tmpdir, 'compose.tar.gz')\n subprocess.check_call([\n 'dsl-compile', '--package', package_path, '--namespace', 'mypipeline',\n '--output', target_tar, '--function', 'download_save_most_frequent_word'])\n with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(target_tar)\n\n self.maxDiff = None\n self.assertEqual(golden, compiled)\n finally:\n shutil.rmtree(tmpdir)\n os.chdir(cwd)", "def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None", "def test_get_software_bundles(self):\n pass", "def package(target, source, env):\n\n # Print out.\n print('')\n print(\"#######################\")\n print(\"# Packaging the files #\")\n print(\"#######################\")\n\n # List of distribution files.\n type_list = [env['DIST_TYPE']]\n if type_list[0] == 'ALL':\n type_list = ['zip', 'tar']\n\n # Loop over the distribution files.\n for dist_type in type_list:\n # The file name.\n if dist_type == 'zip':\n file = env['DIST_FILE'] + '.zip'\n elif dist_type == 'tar':\n file = env['DIST_FILE'] + '.tar.bz2'\n elif dist_type == 'dmg':\n file = env['DIST_FILE'] + '.dmg'\n\n # Print out.\n print(\"\\n\\nCreating the package distribution \" + repr(file) + \".\\n\")\n\n # Create the special Mac OS X DMG file and then stop execution.\n if dist_type == 'dmg':\n # Create the Mac OS X universal application.\n print(\"\\n# Creating the Mac OS X universal application.\\n\\n\")\n cmd = '%s setup.py py2app' % sys.executable\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Create the dmg image.\n print(\"\\n\\n# Creating the DMG image.\\n\\n\")\n cmd = 'hdiutil create -ov -fs HFS+ -volname \"relax\" -srcfolder dist/relax.app ../%s' % file\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Stop executing.\n return\n\n # Open the Zip distribution file.\n if dist_type == 'zip':\n archive = ZipFile(path.pardir + path.sep + file, 'w', compression=8)\n\n # Open the Tar distribution file.\n elif dist_type == 'tar':\n if search('.bz2$', file):\n archive = TarFile.bz2open(path.pardir + path.sep + file, 'w')\n elif search('.gz$', file):\n archive = TarFile.gzopen(path.pardir + path.sep + file, 'w')\n else:\n archive = TarFile.open(path.pardir + path.sep + file, 'w')\n\n # Base directory.\n base = getcwd() + sep\n\n # Walk through the directories.\n for root, dirs, files in walk(getcwd()):\n # Skip the subversion directories.\n if search(\"\\.svn\", root):\n continue\n\n # Add the files in the current directory to the archive.\n for i in range(len(files)):\n # Skip any '.sconsign' files, hidden files, byte-compiled '*.pyc' files, or binary objects '.o', '.os', 'obj', 'lib', and 'exp'.\n if search(\"\\.sconsign\", files[i]) or search(\"^\\.\", files[i]) or search(\"\\.pyc$\", files[i]) or search(\"\\.o$\", files[i]) or search(\"\\.os$\", files[i]) or search(\"\\.obj$\", files[i]) or search(\"\\.lib$\", files[i]) or search(\"\\.exp$\", files[i]):\n continue\n\n # Create the file name (without the base directory).\n name = path.join(root, files[i])\n name = name[len(base):]\n print('relax-' + version + path.sep + name)\n\n # The archive file name.\n arcname = 'relax-' + version + path.sep + name\n\n # Zip archives.\n if dist_type == 'zip':\n archive.write(filename=name, arcname=arcname)\n\n # Tar archives.\n if dist_type == 'tar':\n archive.add(name=name, arcname=arcname)\n\n # Close the archive.\n archive.close()\n\n # Final printout.\n print(\"\\n\\n\\n\")", "def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file" ]
[ "0.70526093", "0.6616482", "0.6440095", "0.6328305", "0.6312369", "0.62404823", "0.6202618", "0.6194006", "0.6173202", "0.61719835", "0.6164269", "0.6147481", "0.6141193", "0.6137979", "0.61363333", "0.6133573", "0.60993564", "0.60972047", "0.6089963", "0.6070059", "0.6045629", "0.60303134", "0.60236454", "0.60002446", "0.59878105", "0.59720063", "0.59594685", "0.5959003", "0.59345907", "0.5927213" ]
0.81422436
0
Test case for copying ("put") and moving ("replace") local files
def testMoveAndCopyFile(self): try: remoteLocator = self.__pathPdbxDictionaryFile fn = self.__fileU.getFileName(remoteLocator) # _, fn = os.path.split(remoteLocator) lPath = os.path.join(self.__workPath, fn) ok = self.__fileU.get(remoteLocator, lPath) self.assertTrue(ok) # Test copy file dPath2 = os.path.join(self.__workPath, "tdir") ok = self.__fileU.mkdir(dPath2) self.assertTrue(ok) lPath2 = os.path.join(dPath2, fn) ok = self.__fileU.put(lPath, lPath2) self.assertTrue(ok) ok = self.__fileU.exists(lPath) self.assertTrue(ok) ok = self.__fileU.exists(lPath2) self.assertTrue(ok) # Remove copied file (to test moving file next) ok = self.__fileU.remove(lPath2) self.assertTrue(ok) ok = self.__fileU.exists(lPath2) self.assertFalse(ok) # Test move file ok = self.__fileU.replace(lPath, lPath2) self.assertTrue(ok) ok = self.__fileU.exists(lPath) self.assertFalse(ok) ok = self.__fileU.exists(lPath2) self.assertTrue(ok) # Now clean up files and dirs ok = self.__fileU.remove(lPath) self.assertTrue(ok) ok = self.__fileU.remove(dPath2) self.assertTrue(ok) except Exception as e: logger.exception("Failing with %s", str(e)) self.fail()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self, src_path: str, tgt_path: str) -> None:", "def _test_upload_dir_contents(self, filenames):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n for filename in filenames:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, filename),\n posixpath.join(remote_dest_dir, filename)))\n with open(os.path.join(local_src_dir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')", "def test_move_overwrite(remote,AB,all_):\n testpath = os.path.join(os.path.abspath(os.path.split(__file__)[0]),\n 'test_dirs','pp','test_move_overwrite')\n try:\n shutil.rmtree(testpath)\n except:\n pass\n os.makedirs(testpath)\n testutil = testutils.Testutils(testpath=testpath)\n\n # Init\n testutil.write('A/fileA0',text='fileA0')\n testutil.write('A/fileB0',text='fileB0')\n\n # copy over\n testutil.copy_tree()\n\n # Start it\n config = testutil.get_config(remote=remote)\n testutil.init(config)\n\n # Apply actions\n testutil.write('A/fileA1',text='fileA1')\n testutil.move('A/fileA0','A/fileB1')\n\n testutil.write('B/fileB1',text='fileB1')\n testutil.move('B/fileB0','B/fileA1')\n\n # Sync\n if AB == 'A':\n mode = 'push'\n else:\n mode='pull'\n\n if all_:\n mode += '_all'\n\n testutil.run(config,mode=mode)\n\n # Check it -- Only need to check A\n diff = testutil.compare_tree()\n\n if all_:\n assert len(diff) == 0\n # In the end, all files are either moved or overwritten. We do not\n # expect there to be any differences\n elif AB == 'A': # Check backups in B\n assert diff == [('missing_inB', 'fileB0')] # Never gets pushed\n \n elif AB == 'B': # Check backups in B\n assert diff == [('missing_inA', 'fileA0')] # Never gets pulled", "def test_retrieve_files_move_existing_file(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n os.mkdir('/tmp/localhost')\n\n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n result_3 = os.path.isdir('/tmp/localhost.%s' % strftime('%H%M%s'))\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_3)\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def _safe_put(localfile, remotefile):\n _suffix = '.%s.bak' % datetime.datetime.now().strftime('%Y-%m-%d_%H%M')\n if exists(remotefile):\n run('mv %s %s' % (remotefile, remotefile+_suffix))\n #~ print('put %s. Backup: %s' % (remotefile, remotefile+_suffix))\n put(localfile, remotefile)", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def test_upload_dir_contents_one_dir(self):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n subdir = 'subdir'\n os.mkdir(os.path.join(local_src_dir, subdir))\n for filename in ['file1', 'file2']:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, subdir, filename),\n posixpath.join(remote_dest_dir, subdir, filename)))\n with open(os.path.join(local_src_dir, subdir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')", "def _copy_file ( self, source, dest ):\n return", "def _process_file_movement(src:str, dest:str, is_move=False)->bool:\n debug_str = \"move\" if (is_move) else \"copy\"\n \n objects = _list_objects(src) # list objects\n for obj in objects:\n if _is_dir(dest) or _is_dir(src):\n temp_dest = _append_object(dest, _get_dest_obj_name(src, obj))\n else:\n temp_dest = dest\n \n if _is_s3(src) and _is_s3(dest): #s3 to s3\n src_bucket, _ = _extract_bucket_key(src)\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_s3(src_bucket, obj, dest_bucket, dest_key)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(src): # s3 to local\n src_bucket, _ = _extract_bucket_key(src)\n _create_local_dir(temp_dest) # create dir if doesn't exist\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_local(src_bucket, obj, temp_dest)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(dest): # local to s3\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file {obj} to {temp_dest}\")\n status = _copy_local_to_s3(obj, dest_bucket, dest_key)\n if status and is_move:\n os.remove(obj) \n \n if not status:\n raise Error(f\"S3 {debug_str} failed.\")\n return True", "def test_15_copyto(self):\n with mock.patch(BUILTINS + '.open', mock.mock_open()):\n status = udocker.FileUtil(\"source\").copyto(\"dest\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"w\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"a\")\n self.assertTrue(status)", "def test_change_file_to_dir_with_file(self):\n #TODO: File must be removed before directory is created\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n\n self.delete_file(dir0, \"foo\")\n self.write_file(dir0, \"foo/bar\", \"baz\")\n self.sync_all()\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")", "def copy(self, source_host, dest_host, filename):", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def test_move_to_trash(self):\n os.chdir(\"testimages/\")\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n filename = os.path.abspath(\"image_to_edit.jpg\")\n files = [filename]\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file = os.path.join(self.trashdir, \"image_to_edit.jpg\")\n self.assertTrue(os.path.isfile(trashed_file))\n # Repeat, to check if backing up works\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file1 = os.path.join(self.trashdir, \"image_to_edit.jpg.1\")\n self.assertTrue(os.path.isfile(trashed_file1))\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file2 = os.path.join(self.trashdir, \"image_to_edit.jpg.2\")\n self.assertTrue(os.path.isfile(trashed_file2))\n # Clear the files\n os.remove(trashed_file)\n os.remove(trashed_file1)", "def testPut(self):\n # XXX - not actually a unit test\n expectedOutput = (b'Transferred ' + self.testDir.asBytesMode().path +\n b'/testfile1 to ' + self.testDir.asBytesMode().path +\n b'/test\"file2')\n def _checkPut(result):\n self.assertFilesEqual(self.testDir.child('testfile1'),\n self.testDir.child('test\"file2'))\n self.assertTrue(result.endswith(expectedOutput))\n return self.runCommand('rm \"test\\\\\"file2\"')\n\n d = self.runCommand('put %s/testfile1 \"test\\\\\"file2\"'\n % (self.testDir.path,))\n d.addCallback(_checkPut)\n d.addCallback(lambda _: self.assertFalse(\n self.testDir.child('test\"file2').exists()))\n return d", "def move_from_temp_directory(self):", "def test_profile_copy_file(profile_manager, test_profile,\n tmpdir, inventory_content):\n\n myfile = tmpdir.mkdir(\"ir_dir\").join(\"fake_hosts_file\")\n myfile.write(inventory_content)\n org_inventory = myfile.strpath\n\n target_path = test_profile.copy_file(org_inventory)\n assert target_path == os.path.join(\n test_profile.path, os.path.basename(org_inventory))\n\n profile_inventory = py.path.local(target_path)\n assert profile_inventory.check(file=1)\n assert inventory_content == profile_inventory.read()", "def test_move_file_new_workspace(self, mock_message, mock_delete, mock_upload, mock_download, mock_paths):\n\n volume_path = os.path.join('the', 'volume', 'path')\n file_path_1 = os.path.join('my_dir', 'my_file.txt')\n file_path_2 = os.path.join('my_dir', 'my_file.json')\n full_path_file_1 = os.path.join(volume_path, file_path_1)\n full_path_file_2 = os.path.join(volume_path, file_path_2)\n\n file_1 = storage_test_utils.create_file(file_path=file_path_1, workspace=self.old_workspace)\n file_2 = storage_test_utils.create_file(file_path=file_path_2, workspace=self.old_workspace)\n file_ids = [file_1.id, file_2.id]\n\n # Call function\n move_files(file_ids, new_workspace=self.new_workspace, new_file_path=None)\n\n # Check results\n mock_download.assert_called()\n mock_upload.assert_called()\n mock_delete.assert_called()", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)", "def test_change_file_to_dir_without_file(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n\n self.delete_file(dir0, \"foo\")\n self.write_dir(dir0, \"foo\")\n self.sync_all()\n self.assertDirPresent(dir0, \"foo\")\n self.assertDirPresent(dir1, \"foo\")", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)", "def test_move_goodtgzfile(self):\n dbf = self.createDummyDBF('goodtar.tgz')\n\n real_ans = (os.path.join(self.td, 'goodtar.tgz'),\n os.path.join(self.td, 'L1', 'goodtar.tgz'))\n self.assertFalse(os.path.isdir(os.path.join(self.td, 'L1')))\n self.assertEqual(real_ans, dbf.move())\n self.assertTrue(os.path.isdir(os.path.join(self.td, 'L1')))\n # Verify that archive was expanded\n self.assertTrue(os.path.isfile(os.path.join(self.td, 'tar1.txt')))\n self.assertTrue(os.path.isfile(os.path.join(self.td, 'tar2.txt')))", "def test_move_file_new_workspace_without_download(self, mock_message, mock_delete, mock_upload, mock_download, mock_paths):\n\n volume_path = os.path.join('the', 'volume', 'path')\n file_path_1 = os.path.join('my_dir', 'my_file.txt')\n file_path_2 = os.path.join('my_dir', 'my_file.json')\n full_path_file_1 = os.path.join(volume_path, file_path_1)\n full_path_file_2 = os.path.join(volume_path, file_path_2)\n\n file_1 = storage_test_utils.create_file(file_path=file_path_1, workspace=self.old_workspace)\n file_2 = storage_test_utils.create_file(file_path=file_path_2, workspace=self.old_workspace)\n file_ids = [file_1.id, file_2.id]\n\n # Call function\n move_files(file_ids, new_workspace=self.new_workspace, new_file_path=None)\n\n # Check results\n mock_download.assert_not_called()\n mock_upload.assert_called()\n mock_delete.assert_called()", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def move_file(source, destination):\n shutil.move(source, destination)" ]
[ "0.704788", "0.69124776", "0.68739474", "0.668043", "0.6659431", "0.6645383", "0.6571377", "0.65401155", "0.6499919", "0.64861435", "0.6444862", "0.643904", "0.6436059", "0.6417866", "0.6415396", "0.6408028", "0.6362181", "0.6331619", "0.63206327", "0.63206327", "0.6296442", "0.6295506", "0.6267673", "0.6261122", "0.6252751", "0.6249293", "0.62168473", "0.6209471", "0.6202359", "0.6201002" ]
0.76052755
0
Test case for downloading remote zip file and extracting contents.
def testZipUrl(self): try: remoteLocator = self.__zipFileUrl # fn = self.__fileU.getFileName(remoteLocator) ok = self.__fileU.isLocal(remoteLocator) self.assertFalse(ok) # lPath = os.path.join(self.__workPath, self.__fileU.getFileName(self.__zipFileUrl)) ok = self.__fileU.get(remoteLocator, lPath) self.assertTrue(ok) ok = self.__fileU.exists(lPath) self.assertTrue(ok) ok = self.__fileU.isLocal(lPath) self.assertTrue(ok) tPath = self.__fileU.getFilePath(lPath) self.assertEqual(lPath, tPath) fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath) ok = fp.endswith("Food_Display_Table.xlsx") self.assertTrue(ok) except Exception as e: logger.exception("Failing with %s", str(e)) self.fail()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)", "def test_result_file_path_get(self):\n headers = { \n 'Accept': 'application/zip',\n }\n response = self.client.open(\n '/v1/result/{file_path}'.format(file_path='file_path_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)", "def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')", "def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')", "def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def download_zip_file(zip_remote, save_dir, force_overwrite, cleanup=False):\n zip_download_path = download_from_remote(zip_remote, save_dir, force_overwrite)\n unzip(zip_download_path, cleanup=cleanup)", "def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass", "def fetch(data_dir):\n file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)\n result_path = os.path.join(data_dir, DESTINATION, NAME)\n return utils.fetch(URL, file_path, result_path)", "def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download_extract_zip(url):\n response = requests.get(url)\n with ZipFile(BytesIO(response.content)) as thezip:\n for zipinfo in thezip.infolist():\n with thezip.open(zipinfo) as thefile:\n df = pd.read_csv(thefile)\n return (df)", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def fetch(self) -> None:\n archive_path = os.path.join(self._output_dir, self._archive_name)\n self._download_file(self._parsed_url.original_url, archive_path)\n try:\n with zipfile.ZipFile(archive_path, \"r\") as zip_file:\n zip_file.extractall(path=self._output_dir)\n except zipfile.BadZipfile:\n raise REANAFetcherError(\"The provided zip file is not valid\")\n\n os.remove(archive_path)\n\n if not self._discover_workflow_specs():\n top_level_entries = [\n os.path.join(self._output_dir, entry)\n for entry in os.listdir(self._output_dir)\n ]\n # Some zip archives contain a single directory with all the files.\n if len(top_level_entries) == 1 and os.path.isdir(top_level_entries[0]):\n top_level_dir = top_level_entries[0]\n # Move all entries inside the top level directory\n # to the output directory.\n for entry in os.listdir(top_level_dir):\n shutil.move(os.path.join(top_level_dir, entry), self._output_dir)\n os.rmdir(top_level_dir)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download (httpfile, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)\n return file", "def test_download(self):\n pass", "def grab_file(url, filename):\n with RemoteZip(url) as zip:\n filenames = zip.namelist()\n for fname in filenames:\n zinfo = zip.getinfo(fname)\n if filename in zinfo.filename and not \".plist\" in zinfo.filename:\n filename = zinfo.filename.split(\"/\")[-1]\n print(\"[i] downloading %s\" % filename)\n extract_and_clean(zip, zinfo.filename, filename)\n return filename\n return filename", "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def pull_zip_file(bucket, client, source, file_name, version):\n\n version_details = {\"version_id\": None, \"last_modified\": None}\n\n try:\n if version is None:\n\n response = client.list_object_versions(\n Bucket=bucket, Prefix=f\"{source}/{file_name}\"\n )\n version_details[\"version_id\"] = [\n x[\"VersionId\"] for x in response[\"Versions\"] if x[\"IsLatest\"]\n ][0]\n last_modified = [\n x[\"LastModified\"] for x in response[\"Versions\"] if x[\"IsLatest\"]\n ][0]\n version_details[\"last_modified\"] = datetime.strftime(\n last_modified, \"%Y-%m-%d %H:%M:%S\"\n )\n\n client.download_file(bucket, f\"{source}/{file_name}\", file_name)\n\n else:\n head = client.head_object(Bucket=bucket, Key=f\"{source}/{file_name}\")\n version_details[\"version_id\"] = version\n version_details[\"last_modified\"] = datetime.strftime(\n head[\"LastModified\"], \"%Y-%m-%d %H:%M:%s\"\n )\n client.download_file(\n bucket,\n f\"{source}/{file_name}\",\n file_name,\n ExtraArgs={\"VersionId\": version},\n )\n\n except ClientError as e:\n logging.error(e)\n\n return (False, version_details)\n print(\n f\"Downloaded {file_name.split('/')[-1]} version {version_details['version_id']} last modified {version_details['last_modified']}\"\n )\n\n return (True, version_details)", "def download_and_unzip(url, extract_to='.'):\n http_response = urlopen(url)\n zipfile = ZipFile(BytesIO(http_response.read()))\n zipfile.extractall(path=extract_to)", "def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))", "def download_file(self, remote_file):\n remote_file.download()", "def download_and_unzip_data(url, destination, prefix='state-'):\n # make sure destination exists or create a temporary directory\n if not destination:\n destination = tempfile.mkdtemp(prefix=prefix)\n logger.debug(\"Created temp directory {}\".format(destination))\n else:\n if not os.path.exists(destination):\n os.makedirs(destination)\n logger.info(\"Created {}\".format(destination))\n zip_filename = get_zipfile_path(url, destination)\n # don't re-download data if raw data file already exists\n if os.path.exists(zip_filename):\n logger.debug(\"{} exists, skipping download\".format(zip_filename))\n else:\n logger.debug(\"Downloading data to {}\".format(zip_filename))\n response = requests.get(url, stream=True)\n # XXX check status code here; e.g., if permissions haven't been granted\n # for a file being downloaded from S3 a 403 will be returned\n content_length = int(response.headers.get('content-length'))\n start = time.clock()\n downloaded = 0\n with open(zip_filename, 'wb') as f:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n downloaded += len(chunk)\n now = time.clock()\n if (now - start) >= 5:\n logger.debug('{0:.2g}% downloaded'.format(downloaded/content_length*100))\n start = now\n f.write(chunk)\n f.flush()\n logger.debug('100% downloaded')\n\n unzip_data(destination, url=url)\n return destination", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()", "def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)" ]
[ "0.7220446", "0.67840797", "0.6767943", "0.67551804", "0.6672871", "0.6658233", "0.6643665", "0.66261744", "0.6577798", "0.65612066", "0.6509779", "0.6480301", "0.6463164", "0.64614177", "0.6453328", "0.6448785", "0.64273727", "0.63947684", "0.6382506", "0.6374403", "0.6358071", "0.633058", "0.6305545", "0.62759227", "0.6245397", "0.6232128", "0.62302727", "0.6209603", "0.619062", "0.6183703" ]
0.7367263
0
Test case for downloading remote file ftp protocol and extracting contents.
def testFtpUrl(self): try: remoteLocator = self.__ftpFileUrl # fn = self.__fileU.getFileName(remoteLocator) ok = self.__fileU.isLocal(remoteLocator) self.assertFalse(ok) # dirPath = os.path.join(self.__workPath, "chem_comp_models") lPath = os.path.join(dirPath, self.__fileU.getFileName(self.__ftpFileUrl)) ok = self.__fileU.get(remoteLocator, lPath) self.assertTrue(ok) ok = self.__fileU.exists(lPath) self.assertTrue(ok) ok = self.__fileU.isLocal(lPath) self.assertTrue(ok) tPath = self.__fileU.getFilePath(lPath) self.assertEqual(lPath, tPath) fp = self.__fileU.uncompress(lPath, outputDir=dirPath) ok = fp.endswith("chem_comp_model.cif") self.assertTrue(ok) except Exception as e: logger.exception("Failing with %s", str(e)) self.fail()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_download(ftp):\n # Active (PORT), Passive (PASV), ExtActive (EPRT), or ExtPassive (EPSV)?\n output, sock, transfer_type = get_transfer_output_and_socket(ftp)\n print_debug(output + \"\\n\")\n\n # What file to download?\n path = raw_input(\"What file do you want to download?\\n> \")\n while not path:\n path = raw_input(\"What file do you want to download?\\n> \")\n try:\n msg_rec, data_rec = ftp.retr_cmd(sock, path, transfer_type)\n print_debug(str(msg_rec))\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)\n\n # Download file.\n if data_rec:\n print_debug(str(data_rec))\n try:\n write_to_local(path, data_rec)\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)\n main_menu(ftp)", "def download_all_ftp(download_dir, file_match, ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory, max_wait=60):\r\n if max_wait < 0:\r\n max_wait = 0\r\n \r\n remove_old_ftp_downloads(download_dir)\r\n #open the file for writing in binary mode\r\n all_files_downloaded = []\r\n print 'Opening local file'\r\n time_start_connect_attempt = datetime.datetime.utcnow()\r\n request_incomplete = True\r\n ftp_exception = \"FTP Request Incomplete\"\r\n attempt_count = 1\r\n while ((datetime.datetime.utcnow()-time_start_connect_attempt)<datetime.timedelta(minutes=max_wait) \\\r\n or attempt_count == 1) and request_incomplete:\r\n try:\r\n #init FTPClient (moved here because of traffic issues)\r\n ftp_client = PyFTPclient(host=ftp_host,\r\n login=ftp_login,\r\n passwd=ftp_passwd,\r\n directory=ftp_directory)\r\n ftp_client.connect()\r\n file_list = ftp_client.ftp.nlst(file_match)\r\n ftp_client.ftp.quit()\r\n #if there is a file list and the request completed, it is a success\r\n if file_list:\r\n for dst_filename in file_list:\r\n local_path = os.path.join(download_dir, dst_filename)\r\n local_dir = local_path[:-1*len(FileExtension(local_path))-1]\r\n #download and unzip file\r\n try:\r\n #download from ftp site\r\n unzip_file = False\r\n if not os.path.exists(local_path) and not os.path.exists(local_dir):\r\n print \"Downloading from ftp site: \" + dst_filename\r\n unzip_file = ftp_client.download_file(dst_filename, local_path)\r\n else:\r\n print dst_filename + ' already exists. Skipping download ...'\r\n #extract from tar.gz\r\n if unzip_file:\r\n\t\t\t print \"Extracting: \" + dst_filename\r\n ExtractNested(local_path, True)\r\n #add successfully downloaded file to list\r\n all_files_downloaded.append(local_dir)\r\n #request successful when one file downloaded and extracted \r\n request_incomplete = False\r\n else:\r\n print dst_filename + ' already extracted. Skipping extraction ...'\r\n except Exception as ex:\r\n print ex\r\n if os.path.exists(local_path):\r\n os.remove(local_path)\r\n continue\r\n \r\n except Exception as ex:\r\n ftp_exception = ex\r\n pass\r\n \r\n if request_incomplete:\r\n print \"Attempt\", attempt_count, \"failed ...\"\r\n attempt_count += 1\r\n if max_wait > 0:\r\n sleep_time = 5.1\r\n if max_wait < 5.1:\r\n sleep_time = max(max_wait, 0.1)\r\n print \"Sleeping for\", (sleep_time-0.1), \"minutes and trying again ...\"\r\n time.sleep((sleep_time-0.1)*60)\r\n \r\n \r\n \r\n if request_incomplete:\r\n print \"Maximum wait time of\", max_wait, \"minutes exeeded and request still failed. Quitting ...\"\r\n raise Exception(ftp_exception)\r\n \r\n print \"All downloads completed!\"\r\n return all_files_downloaded", "def ftp():\n pass", "def _ftp_download(self, host: str, data_dir: str, fn: str,\n source_dir: Path,\n data_fn: str) -> Optional[str]:\n with FTP(host) as ftp:\n ftp.login()\n timestamp = ftp.voidcmd(f'MDTM {data_dir}{data_fn}')[4:].strip()\n date = str(parser.parse(timestamp)).split()[0]\n version = \\\n datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%Y%m%d')\n ftp.cwd(data_dir)\n if data_fn.endswith('.gz'):\n filepath = source_dir / f'{fn}.gz'\n else:\n filepath = source_dir / fn\n with open(filepath, 'wb') as fp:\n ftp.retrbinary(f'RETR {data_fn}', fp.write)\n if data_fn.endswith('.gz'):\n with gzip.open(filepath, 'rb') as f_in:\n with open(source_dir / fn, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n remove(filepath)\n return version", "def test_download(self):\n pass", "def test_download_host(self):\n pass", "def get_file(self):\n while not (self.is_connection_working()):\n print('Connection is not working. Reason should be printed above. Sleeping 5 minutes and retrying.')\n time.sleep(300)\n i = 0\n while True:\n if i >= 3:\n print('Looks like file {} is really not on FTP. Skipping.'.format(self.url))\n return\n if self.file_exists_on_ftp():\n with closing(request.urlopen(self.url, )) as r:\n with open(self.save_filepath, 'wb') as f:\n shutil.copyfileobj(r, f)\n if i > 0:\n print('Download succeeded on attempt {}'.format(i+1))\n return\n else:\n print(\n 'requests.urlopen error. This sometimes means that file {} \"not exists\" on FTP '\n 'but sometimes it is just \"erruption on the Sun\" and file is downloaded on second attempt. '\n 'Sleeping 1 minute and retrying download. Retry will be done {} more times'.format(self.url,\n 3 - (i + 1)))\n time.sleep(60)\n i += 1\n continue\n # print('WARNING: Connection is OK, but system was not able to get file. Skipping.')", "def test_download2(self):\n pass", "def get_ftp_file(\n url: str,\n download_fn: str,\n days_old: int = settings.UPDATE_CYCLE_DAYS,\n force_download: bool = False,\n) -> Tuple[bool, str]:\n\n p = urlparse(url)\n host = p.hostname\n path_str = p.path\n path_obj = pathlib.Path(path_str)\n path_dir = path_obj.parent\n filename = path_obj.name\n\n compress_flag = False\n if not filename.endswith(\".gz\"):\n compress_flag = True\n\n local_fn_date = \"19000101\"\n if os.path.exists(download_fn):\n modtime_ts = os.path.getmtime(download_fn)\n local_fn_date = timestamp_to_date(modtime_ts)\n\n # Only download file if it's newer than what is saved\n rmod_date = \"19010101\"\n\n ftp = ftplib.FTP(host=host)\n try:\n ftp.login()\n\n ftp.cwd(str(path_dir))\n reply = str(ftp.sendcmd(\"MDTM \" + filename)).split()\n reply_code = int(reply[0])\n if (\n reply_code == 213\n ): # 213 code denotes a successful usage of MDTM, and is followed by the timestamp\n remote_mod_date = reply[1][\n :8\n ] # we only need the first 8 digits of timestamp: YYYYMMDD - discard HHMMSS\n\n if local_fn_date >= remote_mod_date and not force_download:\n changed = False\n return (changed, \"Remote file is not newer than local file\")\n\n if compress_flag:\n file_open_fn = gzip.open\n else:\n file_open_fn = open\n\n # Retrieve and save file\n if compress_flag:\n with gzip.open(download_fn, \"wb\") as f:\n ftp.retrbinary(f\"RETR {filename}\", f.write)\n else:\n with open(download_fn, \"wb\") as f:\n ftp.retrbinary(f\"RETR {filename}\", f.write)\n\n msg = \"Downloaded file\"\n changed = True\n return (changed, msg)\n\n except Exception as e:\n now = datetime.datetime.now()\n check_date = (now - datetime.timedelta(days=days_old)).strftime(\"%Y%m%d\")\n\n if local_fn_date > check_date:\n changed = False\n return (\n changed,\n f\"{download_fn} < week old - won't retrieve, filemod date unavailable\",\n )\n else:\n changed = False\n msg = f\"Could not download file: {str(e)}\"\n return (changed, msg)\n\n finally:\n ftp.quit()", "def download(self,filename,localfolder):\n\n self.status = 0\n\n\n if not(filename in self.fileList):\n print('filename:%s not exists'%filename)\n self.status = 1\n return self.status\n\n newfilename = os.path.join(localfolder,filename)\n\n self.file = open(newfilename, 'wb')\n\n try:\n print('Download: ' + filename)\n self.ftp.retrbinary('RETR ' + filename, self.__handleDownload)\n print('Download Complete')\n except ftplib.all_errors:\n print('Error Downloading ' + filename)\n self.status = 1\n return self.status\n\n self.file.close()\n\n return self.status", "def test_download1(self):\n pass", "def _execute(self):\n print(\"[ -ENGINE- ] Executing FTP Download ..\")\n # self.time_point(tag = 'execution')\n main = self.import_engine_as_python_function()\n downloaded_files = main(\n ftp_url=self.params.get(\"ftp_url\", None),\n folder=self.params.get(\"ftp_folder\", None),\n login=self.params.get(\"ftp_login\", None),\n password=self.params.get(\"ftp_password\", None),\n include_ext=self.params.get(\"ftp_include_ext\", None),\n output_folder=self.params.get(\"ftp_output_folder\", None),\n max_number_of_files=self.params.get(\"ftp_max_number_of_files\", None),\n blocksize=self.params.get(\"ftp_blocksize\", None),\n )\n # self.print_execution_time(tag='execution')\n self.io[\"output\"][\"finfo\"][\"dir\"] = os.path.dirname(downloaded_files[-1])\n self.io[\"output\"][\"finfo\"][\"file\"] = os.path.basename(downloaded_files[-1])\n return", "def __init__(self,server = None, username=None, password=None, remotefolder=None):\n\n if ((server == None) and (username==None) and (password==None) and (remotefolder==None)):\n server, username, password, remotefolder = self.parmsByDefault()\n\n self.server = server\n self.username = username\n self.password = password\n self.remotefolder = remotefolder\n self.file = None\n self.ftp = None\n self.status = 0\n\n try:\n self.ftp = ftplib.FTP(self.server)\n self.ftp.login(self.username,self.password)\n self.ftp.cwd(self.remotefolder) \n # print 'Connect to FTP Server: Successfully'\n \n except ftplib.all_errors:\n print('Error FTP Service')\n self.status = 1\n return\n\n\n\n self.dirList = []\n\n try:\n self.dirList = self.ftp.nlst()\n\n except ftplib.error_perm as resp:\n if str(resp) == \"550 No files found\":\n print(\"no files in this directory\")\n self.status = 1\n return\n\n except ftplib.all_errors:\n print('Error Displaying Dir-Files')\n self.status = 1\n return\n\n self.fileList = []\n self.folderList = []\n #only for test\n for f in self.dirList:\n name, ext = os.path.splitext(f)\n if ext != '':\n self.fileList.append(f)", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)", "def __call__(self, url, output_file, pooch):\n\n parsed_url = parse_url(url)\n ftp = ftplib.FTP(timeout=self.timeout)\n ftp.connect(host=parsed_url[\"netloc\"], port=self.port)\n ispath = not hasattr(output_file, \"write\")\n if ispath:\n output_file = open(output_file, \"w+b\")\n try:\n ftp.login(user=self.username, passwd=self.password, acct=self.account)\n command = f\"RETR {parsed_url['path']}\"\n if self.progressbar:\n # Make sure the file is set to binary mode, otherwise we can't\n # get the file size. See: https://stackoverflow.com/a/22093848\n ftp.voidcmd(\"TYPE I\")\n size = int(ftp.size(parsed_url[\"path\"]))\n use_ascii = bool(sys.platform == \"win32\")\n progress = tqdm(\n total=size,\n ncols=79,\n ascii=use_ascii,\n unit=\"B\",\n unit_scale=True,\n leave=True,\n )\n with progress:\n\n def callback(data):\n \"Update the progress bar and write to output\"\n progress.update(len(data))\n output_file.write(data)\n\n ftp.retrbinary(command, callback, blocksize=self.chunk_size)\n else:\n ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)\n finally:\n ftp.quit()\n if ispath:\n output_file.close()", "def download_file(self, remote_file):\n remote_file.download()", "def handle_file_retrieval(self, msg):\n Logger.info(\"Slave: Retrieving files\")\n params = msg.get_field(MessageKeys.params_key)\n host = msg.get_field(MessageKeys.sender_key)\n port = params[MessageKeys.ftp_port_key]\n subpath = params[MessageKeys.ftp_subpath_key]\n self.presentation.set_files(params[MessageKeys.presentation_content_key])\n self.presentation.reset()\n self.layout.init_presentation()\n self.retrieve_files_over_ftp(host, port, subpath)\n self.presentation_ended = False\n return self.create_response(msg.get_command())", "def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")", "def download(self, item, tamanho, destino):\n down = DownloadFtp(item, tamanho, destino)\n self.ftp.retrbinary('RETR %s' % item, down.grava_dados, 65536)\n down.close()", "def do_ftp(ftp):\n login(ftp)\n main_menu(ftp)", "def download_ftp(dst_filename, local_path, ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory):\r\n file = open(local_path, 'wb')\r\n print 'Reconnecting ...'\r\n handle = ftp_connect(ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory)\r\n handle.voidcmd('TYPE I')\r\n dst_filesize = handle.size(dst_filename)\r\n attempts_left = 15\r\n while dst_filesize > file.tell():\r\n try:\r\n if file.tell() == 0:\r\n res = handle.retrbinary('RETR %s' % dst_filename, file.write)\r\n else:\r\n # retrieve file from position where we were disconnected\r\n handle.retrbinary('RETR %s' % dst_filename, file.write, rest=file.tell())\r\n except Exception as ex:\r\n print ex\r\n if attempts_left == 0:\r\n print \"Max number of attempts reached. Download stopped.\"\r\n handle.quit()\r\n file.close()\r\n os.remove(local_path)\r\n return False\r\n print 'Waiting 30 sec...'\r\n time.sleep(30)\r\n print 'Reconnecting ...'\r\n handle.quit()\r\n handle = ftp_connect(ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory)\r\n print 'Connected. ' + str(attempts_left) + 'attempt(s) left.'\r\n attempts_left -= 1\r\n handle.quit()\r\n file.close()\r\n return True", "def main(url, localfile):\n ph.download_file(url, localfile)", "def download(host, directory, filename, overwrite=False):\n print(\"\\n- start FTP connection\\n-----------------------\")\n # connect to the FTP\n with ftplib.FTP(host) as ftp_connection:\n\n ftp_connection.login() # log into ftp server\n ftp_connection.cwd(directory) # change dir on ftp server\n print(\"- content of ftp folder: \") # look inside the directory\n print(\"- \", ftp_connection.nlst())\n\n # if overwrite is set to true - overwrite the existing file or create it\n if overwrite:\n print(\"- File %s wird erstellt/überschrieben\\n starte download ...\" % filename)\n # create the file\n with open(filename, \"wb\") as downloadFile:\n # download the content of the server file\n ftp_connection.retrbinary(\"RETR %s/%s\" % (directory, filename), downloadFile.write)\n\n # if overwrite is set to False or default\n else:\n # if the file allready exists in our root dir we dont want to download it\n if os.path.exists(filename):\n print(\"- File %s existiert - skip download\" % filename)\n # if it is not found in the root dir of our local system we want to download it\n else:\n print(\"- File %s existiert NICHT\\n starte download ...\" % filename)\n # create the file\n with open(filename, \"wb\") as downloadFile:\n\n # download the content of the server file\n ftp_connection.retrbinary(\"RETR %s/%s\" % (directory, filename), downloadFile.write)", "def download():\n raise NotImplementedError", "def fetchCATH(filename, ftp_host=None, ftp_path=None, **kwargs):\n if ftp_host == None:\n ftp_host = 'orengoftp.biochem.ucl.ac.uk'\n if ftp_path == None:\n ftp_path = '/cath/releases/daily-release/newest/'\n from ftplib import FTP\n report = kwargs.get('report', True)\n output_folder = kwargs.pop('folder', None)\n ftp_fn = filename\n try:\n ftp = FTP(ftp_host)\n except Exception as error:\n raise type(error)('FTP connection problem, potential reason: '\n 'no internet connectivity')\n else:\n success = 0\n failure = 0\n filenames = []\n ftp.login('')\n \n data = []\n try:\n ftp.cwd(ftp_path)\n ftp.retrbinary('RETR ' + ftp_fn, data.append)\n except Exception as error:\n if ftp_fn in ftp.nlst():\n LOGGER.warn('{0} download failed ({1}). It is '\n 'possible that you do not have rights to '\n 'download .gz files in the current network.'\n .format(ftp_fn, str(error)))\n else:\n LOGGER.warn('{0} download failed. {1} does not exist '\n 'on {2}.'.format(ftp_fn, ftp_fn, ftp_host))\n failure += 1\n filenames.append(None)\n else:\n if len(data):\n if output_folder is None:\n output_folder = getcwd()\n filename_full = join(output_folder, ftp_fn)\n\n with open(filename_full, 'w+b') as pdbfile:\n write = pdbfile.write\n [write(block) for block in data]\n\n filename_full = normpath(relpath(filename_full))\n if report: \n LOGGER.debug('{0} downloaded ({1})'\n .format(ftp_fn, sympath(filename_full)))\n success += 1\n filenames.append(filename_full)\n else:\n LOGGER.warn('{0} download failed, reason unknown.'\n .format(ftp_fn))\n failure += 1\n filenames.append(None)\n ftp.quit()", "def ftp_download(url, dir):\n filename = url.split('/')[-1]\n with closing(request.urlopen(url)) as r:\n with open(dir + filename, 'wb+') as f:\n shutil.copyfileobj(r, f)\n return dir + filename", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'):\n try:\n scp_obj = SCP(host, port, user, password)\n scp_obj.connect()\n except Exception:\n scp_obj = SCP(host, port, user, password)\n\n if os.path.isdir(local_file):\n print('not support now!')\n else:\n scp_obj.download(local_file, ftp_path, mode)\n scp_obj.close()", "def retrieve_files_over_ftp(self, host, port, subpath):\n write_path = os.path.join(os.getcwd(), PathConstants.MEDIA_FOLDER)\n if not os.path.isdir(write_path):\n os.mkdir(write_path)\n client = RemuFTPClient(host, port, subpath, write_path, self)\n client.connect()" ]
[ "0.6999721", "0.67185104", "0.66923946", "0.66601884", "0.65115726", "0.64415693", "0.63941395", "0.6366749", "0.63455987", "0.630195", "0.62936544", "0.6291365", "0.62748814", "0.62570983", "0.6231759", "0.62223476", "0.619586", "0.61891216", "0.61566657", "0.6152847", "0.61369026", "0.6130865", "0.6125208", "0.60883176", "0.6087514", "0.60848594", "0.6078695", "0.6075257", "0.60667163", "0.6057629" ]
0.70737445
0
Test case for extracting contents from xz file
def testXzFile(self): try: remoteLocator = self.__xzFile fn = self.__fileU.getFileName(remoteLocator) lPath = os.path.join(self.__workPath, fn) ok = self.__fileU.get(remoteLocator, lPath) self.assertTrue(ok) ok = self.__fileU.exists(lPath) self.assertTrue(ok) ok = self.__fileU.isLocal(lPath) self.assertTrue(ok) tPath = self.__fileU.getFilePath(lPath) self.assertEqual(lPath, tPath) fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath) ok = fp.endswith(".pdb") self.assertTrue(ok) except Exception as e: logger.exception("Failing with %s", str(e)) self.fail()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))", "def test_verify_unzip(self):\n assert os.path.exists(\n os.path.join(\n settings.MEDIA_ROOT,\n \"indices\",\n \"test-index\",\n \"data\",\n \"sample.txt\"\n )\n )", "def test_get_file_content(self):\n pass", "def test_get_info_function() -> None:\n current_directory = Path.cwd()\n with zipfile.ZipFile(\n current_directory / 'app' / 'tests' / 'files' / 'oneFile.zip') as zip_object:\n res = get_info_about_file(zip_object, 'dotnetfx.exe')\n assert res == {'path': 'dotnetfx.exe', 'size': 21823560}", "def test_read_file():\n z = XPIManager(get_path('xpi/install_rdf_only.xpi'))\n assert z.read('install.rdf') is not None", "def test__decompress(filename):\n with open(filename, mode=\"rb\") as file_handle:\n name, content = Submit._decompress(filename, file_handle)\n assert name.endswith(\"EcoliCore.xml\")\n assert len(content.read()) >= 494226", "def test_DDSim_getDetectorXML_Local_TarGZ_2( self ):\n gLogger.setLevel(\"ERROR\")\n self.ddsim.detectorModel = \"myDet\"\n self.ddsim.ops.getOptionsDict = Mock( return_value = S_OK( dict(camelot=\"/dev/null\" ) ) )\n self.ddsim.workflow_commons = dict()\n res = self.ddsim._extractZip()\n res = self.ddsim._extractZip()\n gLogger.error( \" res \" , res )\n expectedPath = os.path.join(os.getcwd(), self.ddsim.detectorModel, self.ddsim.detectorModel+\".xml\" )\n self.assertEqual( res['Value'], expectedPath )\n self.assertTrue( os.path.exists( expectedPath ) )", "def read_pex_info_content(entry_point):\r\n if is_compressed(entry_point):\r\n return read_pexinfo_from_zip(entry_point)\r\n else:\r\n return read_pexinfo_from_directory(entry_point)", "def mtz_get_xdata(mtz_filename):\n\n # Get location of shell script\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))\n )\n mtzinfo_shell = os.path.join(__location__, \"shell_scripts/mtzinfo.sh\")\n\n # Run script and get the standard output or raise an exception\n result = procrunner.run(\n [mtzinfo_shell, mtz_filename], print_stdout=False, timeout=5\n )\n\n # Check that it worked\n assert result[\"exitcode\"] == 0, f\"Error collecting information from {mtz_filename}\"\n assert result[\"stderr\"] == b\"\", f\"Error collecting information from {mtz_filename}\"\n assert (\n result[\"timeout\"] == False\n ), f\"Error collecting information from {mtz_filename}\"\n\n # print(result)\n\n output = str(result[\"stdout\"])\n # print(f\"Output: {output}\")\n\n search_regex = re.compile(\"(?<=XDATA)[ a-z0-9.]+\")\n xdata = search_regex.findall(output)\n # print(xdata)\n\n if len(xdata) > 1:\n print(\n f\"{len(xdata):d} lines of xdata found in {mtz_filename}, using first occurence\"\n )\n\n list_num = xdata[0].split()\n numbers = [float(num) for num in list_num]\n # print(numbers)\n\n return tuple(numbers)", "def test_open_by_name(self):\n self._test_listing_content(ZIPPATH)", "def test_zip_files(self):\n base_zip_files = ['whypython.txt', 'states.dbf', 'cities.kmz']\n\n text_file = os.path.join(os.getcwd(), 'test-data', 'whypython.txt')\n dbf_file = os.path.join(os.getcwd(), 'test-data', 'states.dbf')\n kml_file = os.path.join(os.getcwd(), 'test-data', 'cities.kmz')\n #non_file = os.path.join(os.getcwd(), 'test-data', 'emptyfolder')\n self.request['params'][0]['response']['docs'][0]['path'] = text_file\n self.request['params'][0]['response']['docs'][1]['path'] = dbf_file\n self.request['params'][0]['response']['docs'][2]['path'] = kml_file\n #self.request['params'][0]['response']['docs'][3]['path'] = non_file\n __import__(self.request['task'])\n getattr(sys.modules[self.request['task']], \"execute\")(self.request)\n zip_files = zipfile.ZipFile(os.path.join(self.temp_folder, 'output.zip')).namelist()\n self.assertEqual(sorted(zip_files), sorted(base_zip_files))", "def x_unzip(xid=None):\n\t_loadconfig()\n\tnavimport.conf.print_zips()\n\n\txrec = None\n\tif xid == None:\n\t\txrec = _prompt_xid(\"No to unzip >\")\n\telse:\n\t\txrec = navimport.conf.get_xplane_zip_info(0)\n\n\tif xrec != None:\n\t\tprint xrec\n\n\t\ts = \"unzip \"\n\t\ts += \" -d \" + navimport.conf.work_dir(\"/xplane_unzipped/%s\" % xrec['zip_dir'])\n\t\ts += \" \"\n\t\ts += navimport.conf.work_dir(\"/xplane_zips/%s\" % xrec['file_name'])\n\t\tlocal(s)", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def testZipUrl(self):\n try:\n remoteLocator = self.__zipFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, self.__fileU.getFileName(self.__zipFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n ok = fp.endswith(\"Food_Display_Table.xlsx\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_result_file_path_get(self):\n headers = { \n 'Accept': 'application/zip',\n }\n response = self.client.open(\n '/v1/result/{file_path}'.format(file_path='file_path_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def extract_file(path):", "def extract_zip(dataset_path, target_path):\n dataset_path = os.path.join(dataset_path,'covidx-cxr2.zip')\n print(f'Extracting zip file: {dataset_path}')\n with ZipFile(file=dataset_path) as zip_file:\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n zip_file.extract(member=file, path=os.path.join(target_path, 'xray'))\n os.remove(dataset_path)", "def test_open_by_named_fobj(self):\n with open(ZIPPATH, 'rb') as f:\n self._test_listing_content(f)", "def testReadFile(self):\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -\n len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def test_single_file_resource(self):\n year = random.randint(2001, 2020)\n name = \"eia923-%d.zip\" % year\n size = random.randint(500000, 800000)\n\n md5_hash = random.choice([\n \"4bd7e1025c91c00b50b6cef87cb9bfad\",\n \"883895453cb3144b97d0095472f6136e\",\n \"c271dfc0ca452b6582f0e592f57351ef\"])\n\n url = \"https://zenodo.org/api/deposit/depositions/%d/files/%s\" % (\n random.randint(10000, 99999), uuid.uuid4())\n\n fake_resource = {\n \"filename\": name,\n \"links\": {\"download\": url},\n \"filesize\": size,\n \"checksum\": md5_hash\n }\n\n package = eia923_raw.datapackager([fake_resource])\n res = package[\"resources\"][0]\n\n assert(res[\"name\"] == name)\n assert(res[\"title\"] == \"eia923-%d\" % year)\n assert(res[\"path\"] == url)\n assert(res[\"parts\"][\"year\"] == year)\n assert(res[\"remote_url\"] == url)\n\n assert(res[\"mediatype\"] == \"application/zip\")\n assert(res[\"format\"] == \"zip\")\n\n assert(res[\"bytes\"] == size)\n assert(res[\"hash\"] == md5_hash)", "def get_file_data(filename):", "def extract(src):\n CONN_RMT = False\n\n if isinstance(src, object):\n # Make local copy for processing\n local_file = \"%s/%s.%s\" % (os.getcwd(), src._id, src._file_types[0])\n\n if CONN_RMT:\n if not get_rmt_file(src._uri, src._src_creds, local_file):\n return False\n\n if src._file_types[0] == 'rss':\n # preprocess_file(src._file_types[0], local_path)\n return cnvt_xml2dict(local_file, '%s/test.json' % os.getcwd())", "def test_identify_contents_1(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\")\n exp_num_items = 2\n self.assertEqual(len(list_of_items), exp_num_items)", "def test_parse_source_xml(self):\n\n # Path to the source xml\n file = self.xmlfilepath + os.sep + \"sourcefile.xml\"\n\n # Path to non existent source file\n in_file = self.xmlfilepath + os.sep + \"sourcefile.pwg\"\n\n # Test for correct data\n # NOTE : For this test case to pass the source xml file should be\n # present in the download path\n self.assertEqual(\n parse_source_xml(file),\n (\n \"DLTINS_20210117_01of01.zip\",\n \"http://firds.esma.europa.eu/firds/DLTINS_20210117_01of01.zip\",\n ),\n )\n\n # Test for incorrect data\n self.assertEqual(parse_source_xml(in_file), None)", "def test_identify_contents_7(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"invalid\")\n self.assertIsNone(list_of_items)", "def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")", "def keyholemarkup2x(file,output='df'):\n r = re.compile(r'(?<=\\.)km+[lz]?',re.I)\n try:\n extension = r.search(file).group(0) #(re.findall(r'(?<=\\.)[\\w]+',file))[-1]\n \n \n except IOError as e:\n logging.error(\"I/O error {0}\".format(e))\n if (extension.lower()=='kml') is True:\n buffer = file\n elif (extension.lower()=='kmz') is True:\n kmz = ZipFile(file, 'r')\n \n vmatch = np.vectorize(lambda x:bool(r.search(x)))\n A = np.array(kmz.namelist())\n sel = vmatch(A)\n buffer = kmz.open(A[sel][0],'r')\n \n else:\n raise ValueError('Incorrect file format entered. Please provide the '\n 'path to a valid KML or KMZ file.') \n \n \n parser = xml.sax.make_parser()\n handler = PlacemarkHandler()\n parser.setContentHandler(handler)\n parser.parse(buffer)\n \n try:\n kmz.close()\n except:\n pass\n \n df = pd.DataFrame(handler.mapping).T\n names = list(map(lambda x: x.lower(),df.columns))\n if 'description' in names:\n extradata = df.apply(PlacemarkHandler.htmlizer,axis=1)\n df = df.join(extradata)\n \n \n output = output.lower()\n \n if output=='df' or output=='dataframe' or output == None:\n result = df\n \n elif output=='csv':\n out_filename = file[:-3] + \"csv\"\n df.to_csv(out_filename,encoding='utf-8',sep=\"\\t\")\n result = (\"Successfully converted {0} to CSV and output to\"\n \" disk at {1}\".format(file,out_filename))\n \n elif output=='gpd' or output == 'gdf' or output=='geoframe' or output == 'geodataframe':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n result = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n \n \n elif output=='geojson' or output=='json':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n try:\n import geojson\n except ImportError as e:\n raise ImportError('This operation requires geojson. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"geojson\"\n gdf.to_file(out_filename,driver='GeoJSON')\n validation = geojson.is_valid(geojson.load(open(out_filename)))['valid']\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to GeoJSON and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The geojson conversion did not create a '\n 'valid geojson object. Try to clean your '\n 'data or try another file.')\n \n elif output=='shapefile' or output=='shp' or output =='esri shapefile':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n \n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n try:\n import shapefile\n except ImportError as e:\n raise ImportError('This operation requires pyshp. {0}'.format(e))\n \n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"shp\"\n gdf.to_file(out_filename,driver='ESRI Shapefile')\n sf = shapefile.Reader(out_filename)\n import shapefile\n sf = shapefile.Reader(out_filename)\n if len(sf.shapes())>0:\n validation = \"yes\"\n else:\n validation = \"no\"\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to Shapefile and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The Shapefile conversion did not create a '\n 'valid shapefile object. Try to clean your '\n 'data or try another file.') \n else:\n raise ValueError('The conversion returned no data; check if'\n ' you entered a correct output file type. '\n 'Valid output types are geojson, shapefile,'\n ' csv, geodataframe, and/or pandas dataframe.')\n \n return result", "def _test_listing_content(self, f):\n found = []\n with Archive(f) as a:\n for entry in a:\n found.append(entry.pathname)\n\n self.assertEqual(set(found), set(FILENAMES))", "def test_identify_contents_2(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\".DS_Store\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\",\n ignore_set=ignore_set)\n exp_num_items = 1\n self.assertEqual(len(list_of_items), exp_num_items)" ]
[ "0.62932056", "0.62829584", "0.6257688", "0.6187469", "0.61508626", "0.5973567", "0.5958874", "0.5951237", "0.5783568", "0.56802505", "0.5677763", "0.56722355", "0.56672055", "0.56611294", "0.56451595", "0.5629609", "0.5598541", "0.5586034", "0.5574584", "0.55610496", "0.555113", "0.5543425", "0.550074", "0.54947394", "0.5478417", "0.5461112", "0.5455424", "0.5448644", "0.5444901", "0.54436904" ]
0.7376655
0
Activation function of hidden layers.
def forward_hidden_activation(self, X): return np.tanh(X)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_layer_activation(x):\n return x", "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def __call__(self, inputs):\n return self._hidden_activation(inputs)", "def activation(z):\n # formula for sigmoid\n return 1 / (1 + np.exp(-z))", "def _hidden_activation(self, inputs):\n if self.act_enc is None:\n act_enc = lambda x: x\n else:\n act_enc = self.act_enc\n return act_enc(self._mappings(inputs))", "def neuron_activation(u, previous_state, Vin, Wres):\n input_activation = Vin.dot(u)\n assert input_activation.shape == Vin.shape, 'input activation wrong shape'\n recurrent_activation = previous_state.dot(Wres) # activation from neurons\n X = sigmoid_af(input_activation + recurrent_activation) # K x N\n return X", "def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def activation(x):\n # return np.tanh(x)\n return np.maximum(0,x)", "def activate(self, input_layer, funcname=None):\n if isinstance(funcname, tuple):\n funcname = funcname[0]\n params = funcname[1:]\n if funcname is None:\n funcname = self.activation_func\n if funcname == 'LINEAR':\n return input_layer\n activation_map = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'ELU': tf.nn.elu,\n 'SIGMOID': tf.nn.sigmoid,\n 'TANH': tf.nn.tanh,\n 'LRELU': lambda x, name: tf.maximum(params[0]*x, x, name=name)\n }\n return activation_map[funcname](input_layer, name=funcname.lower())", "def activation_function(self, z):\n return 1. / (1. + np.exp(-np.clip(z, -250, 250)))", "def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct", "def forward(W,X):\n return activation_func(np.dot(add_bias(X),W))", "def feedforward(self,inputs,hidden_activation=tanh,output_activation=tanh):\n\n # These two lists will contain the inputs and the outputs for each layer, respectively\n self.netIns = []\n self.netOuts = []\n\n input_samples=inputs.shape[0]\n\n #Currently, this will cause a crash when the network was created without bias nodes\n I = np.concatenate((inputs,np.ones((input_samples,1))),axis=1) # adds the bias input of 1\n self.netOuts.append(I) # keeping track of the outputs of every layer\n\n #The input is propagated through the layers\n for idx in range(self.size):\n W = self.weights[idx]\n\n I = np.dot(I,W) #performs the dot product between the input vector and the weight matrix\n self.netIns.append(I) # keeping track of the inputs to each layer\n\n #if we are on the last layer, we use the output activation function\n if idx == self.size -1:\n I = output_activation(I)\n #otherwise, we use the activation for the hidden layers\n else:\n I = hidden_activation(I)\n #I = np.concatenate((I,np.ones((I.shape[0],1))), axis=1)\n self.netOuts.append(I)\n\n #self.out = I\n return I", "def linear_activation_forward(A_prev, W, b, activation):\n pass", "def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]", "def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]", "def activationFunction(self, z):\n if self.__activation == 'sig':\n activation = 1 / (1 + np.exp(-z))\n else:\n tanhnum = np.exp(z) - np.exp(-z)\n tanhden = np.exp(z) + np.exp(-z)\n activation = tanhnum / tanhden\n return activation", "def feed_forward(self):\n self.hidden_activation = self._sigmoid(np.dot(self.input_activation, self.w1))\n self.output_activation = self._sigmoid(np.dot(self.hidden_activation, self.w2))", "def activation(x):\n return 1 / (1 + torch.exp(-x))", "def _forward(z: np.array, W: np.array, b: np.array,\n activation: str) -> np.array:\n a = np.dot(z, W) + b\n if activation == 'sigmoid':\n return sigmoid(a)\n elif activation == 'identity':\n return identity(a)", "def activation(self, z):\r\n denominator = 1 + np.exp(-z)\r\n result = 1/denominator\r\n return result", "def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)", "def activate(self, inputs):\n # Calculate values of hidden nodes\n hidden_values = []\n for i in range(self.hidden_layer_size):\n hidden_node_value = 0\n bias_weight = self.bias_weights[i]\n hidden_node_value += bias_weight\n for j in range(self.input_values):\n weight = self.input_to_hidden_layer_weights[i][j]\n hidden_node_value += inputs[j] * weight\n\n # ReLU activation function\n hidden_node_value = max(hidden_node_value, 0)\n\n hidden_values.append(hidden_node_value)\n\n # Calculate output value\n output_value = 0\n for i in range(self.hidden_layer_size):\n output_value += hidden_values[i] * \\\n self.hidden_to_output_layer_weights[i]\n\n return output_value", "def sigmoid(X,W,b):\n preActivation = np.dot(X, W) + b\n return (1.0)/(1.0 + np.exp(-preActivation))", "def activation_function(self, X):\n return self.net_input(X)", "def activation_function(self, X):\n return self.net_input(X)", "def sigmoid_activation_function(z):\n val = 1 / (1 + np.exp(-z))\n return val", "def pre_activation(features, weights, bias):\n # this is a dot product between features and weights, added to bias after.\n return np.dot(features, weights) + bias", "def activation_func(x):\r\n a = -1\r\n return 1/(1+np.exp(-a*x))" ]
[ "0.7260774", "0.7122866", "0.7001082", "0.6929951", "0.69202", "0.68869704", "0.68860483", "0.6844513", "0.66774213", "0.66773957", "0.6666057", "0.6664862", "0.66458833", "0.6635408", "0.6609628", "0.6577476", "0.6565831", "0.6560835", "0.65351784", "0.6441285", "0.6438028", "0.6415474", "0.64107394", "0.6403249", "0.6398191", "0.6396151", "0.6396151", "0.637774", "0.6370806", "0.6358973" ]
0.71660954
1
Derivative of the activation function of hidden layers.
def backward_hidden_activation(self, Y, d): # y = tanh(x) ==> dy/dx = (1 - tanh(x)^2) = (1 - y^2) return d * (1 - Y ** 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derivative_activation(z):\n return activation(z) * (1 - activation(z))", "def gradient_hidden(self, h):\n if self.relu:\n return 1.0*(h > 0)\n else:\n return 1 - h * h", "def sigmoid_gradient(z):\n #derivative of sigmoid\n return z * (1 - z)", "def cost_derivative(output_activations, y):\n return (output_activations - y)", "def cost_derivative(self, output_activations, y):\r\n return (output_activations-y)", "def derivative(a, y, z):\n return (a - y) * Sigmoid.derivative(z)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def backwards(delta,params,name='',activation_deriv=sigmoid_deriv):\n # everything you may need for this layer\n W = params['W' + name]\n b = params['b' + name]\n X, pre_act, post_act = params['cache_' + name]\n # your code here\n # do the derivative through activation first\n # then compute the derivative W,b, and X\n \n delta_pre = activation_deriv(post_act) * delta\n # (in_dim, out_dim) = (in_dim, examples) @ (examples, out_dim)\n grad_W = X.transpose() @ delta_pre\n grad_b = np.sum(delta_pre, axis=0, keepdims=True) # (1, out_dim)\n # (examples, in_dim) = (examples, out_dim) @ (out_dim, in_dim)\n grad_X = delta_pre @ W.transpose()\n\n # store the gradients\n params['grad_W' + name] = grad_W\n params['grad_b' + name] = grad_b\n return grad_X", "def grad_sigmoid(self):\n grad = self.sigmoid(self.x) * (1 - self.sigmoid(self.x))\n return grad", "def cost_derivative(self, output_activations, y):\n\t\treturn (output_activations - y)", "def cost_derivative(self, output_activations, y):\n return 2 * (output_activations - y)", "def test_activation_gradient():\n np.random.seed(7477)\n cnn = CNNTanh([1, 1])\n X = np.random.randn(10, 1)\n Y = cnn.forward_hidden_activation(X)\n eps = 1e-7\n Y1 = cnn.forward_hidden_activation(X + eps)\n D = cnn.backward_hidden_activation(Y, np.ones_like(Y))\n D1 = (Y1 - Y) / eps\n error = np.abs(D1 - D).max()\n assert np.isclose(error, 0, atol=1e-5)", "def sigmoid_derivative(x):\n return x * (1-x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def sigmoid_derivative(x):\n return x * (1.0 - x)", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def compute_hidden_delta(self):\r\n out = self.activation\r\n outedges = self.out_edges\r\n wsum = 0.0\r\n for edge in outedges:\r\n wsum += edge.old_weight*(edge.dest.delta)\r\n self.delta = out*(1-out)*wsum", "def backpropagation(self):\n\n print \"backpropagation in Convlayer\"\n\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n WF = self.__nextLayer.numberOfNeuronsInLayer\n dNext = np.reshape(self.__nextLayer.getDeltas(), (1, 1, 1, WF))\n else:\n dNext = self.__nextLayer.getDeltas()\n\n self.deltas = np.zeros(self.outputValues.shape)\n\n # Compute Deltas\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n for n in range(self.outputValues.shape[0]):\n for nf in range(self.numberOfFilters):\n for h in range(self.outputValues.shape[2]):\n for w in range(self.outputValues.shape[3]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n, nf, h, w] * dNext[\n :, :, :, nf]\n self.deltas[n, nf, h, w] += deltas_i\n\n elif self.__previousLayer is None:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[0]\n\n else:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[n]\n\n # print \"shape of delta is \" + str(self.deltas.shape)\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n # Compute delta Biases\n deltaBiases = (np.sum(self.deltas, axis=(0, 2, 3)))\n assert deltaBiases.shape == self.bias.shape\n\n # Compute delta Kernels\n\n deltaKernel = np.zeros(self.weights.shape)\n\n for ninp in range(self.inputShape[0]):\n for nf in range(self.numberOfFilters):\n flippedDelta = self.flipArray(self.deltas[ninp, nf, :, :]) # Flips Kernel for the convolution\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputs.shape[2] - flippedDelta.shape[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputs.shape[3] - flippedDelta.shape[1] + 1, self.stride[1]):\n activationMap = self.inputs[ninp, cin,\n h:h + flippedDelta.shape[0],\n w:w + flippedDelta.shape[1]] # Input Map used for the convolution\n deltaKernel[nf, nh, nw] = np.sum(activationMap * flippedDelta) # Convolution\n nw += 1\n nh += 1\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n self.deltaWeights = deltaKernel\n self.deltaBiases = deltaBiases\n\n if self.__previousLayer is None:\n return self.deltas, self.deltaWeights, self.deltaBiases\n else:\n return self.__previousLayer.backpropagation()", "def d_act_f(self, p):\n if self.activation is None:\n return p\n if self.activation == \"tanh\":\n return 1 - np.tanh(p)**2\n if self.activation == \"sigmoid\":\n return (1 / (1 + np.exp(-p))) * (1 - (1 / (1 + np.exp(-p))))\n if self.activation == \"relu\":\n p[np.where(p < 0)] = 0\n return p\n\n return p", "def backward(self, delta):\n if self.activation_type == \"sigmoid\":\n grad = self.grad_sigmoid()\n\n elif self.activation_type == \"tanh\":\n grad = self.grad_tanh()\n\n elif self.activation_type == \"ReLU\":\n grad = self.grad_ReLU()\n\n return grad * delta", "def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))", "def sigmoid_derivative(x):\n\n return sigmoid(x) * (1 - sigmoid(x))", "def derivatives(self, x=[], function='sigmoid', alpha=0.01, y_pred = [], y = []):\n if function == \"sigmoid\":\n dadz = self.activation(x,\"sigmoid\")*(1-self.activation(x,\"sigmoid\"))\n return dadz\n\n if function == \"swish\":\n dadz = self.activation(x,\"sigmoid\") + x * self.activation(x,\"sigmoid\") * (1-self.activation(x,\"sigmoid\"))\n return dadz\n \n if function == \"linear\":\n dadz = np.ones(np.shape(x))\n return dadz\n\n if function == \"relu\":\n dadz = np.greater(x, 0).astype(int)\n return dadz\n\n if function == \"leakyrelu\":\n dadz = 1 * (x > 0) + alpha * (x<0)\n return dadz\n \n if function == \"mse\":\n assert(np.shape(y_pred)) == np.shape(y)\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n dCdy_pred = np.sum((y_pred - y), axis=0)*(1/(m*n))*2\n\n else:\n m = 1\n n = len(y) \n dCdy_pred = (y_pred - y)*(1/(m*n))*2\n return dCdy_pred", "def sigmoid_backward(dout, cache):\n dx, x = None, cache\n\n f = lambda x: 1/(1 + np.exp(-x)) # activation function (sigmoid)\n\n fun = f(x)\n\n dx = np.multiply(fun, (1-fun))\n dx = np.multiply(dx,dout)\n\n return dx", "def grad_sigmoid(self):\r\n return self.sigmoid(self.x) * (1 - self.sigmoid(self.x))", "def sigmoid_grad(z):\n return Sigmoid(z) * (1 - Sigmoid(z))" ]
[ "0.7980077", "0.6814162", "0.6745424", "0.67276245", "0.6709438", "0.6657668", "0.66385204", "0.66385204", "0.66385204", "0.66385204", "0.66360676", "0.6593483", "0.6587966", "0.65865093", "0.65624034", "0.6561811", "0.6504623", "0.6504623", "0.6497736", "0.6481813", "0.6478363", "0.64751655", "0.6467388", "0.64646304", "0.64528006", "0.64497083", "0.64359385", "0.64353967", "0.6432399", "0.6411271" ]
0.72875506
1
Test the gradient of the activation function.
def test_activation_gradient(): np.random.seed(7477) cnn = CNNTanh([1, 1]) X = np.random.randn(10, 1) Y = cnn.forward_hidden_activation(X) eps = 1e-7 Y1 = cnn.forward_hidden_activation(X + eps) D = cnn.backward_hidden_activation(Y, np.ones_like(Y)) D1 = (Y1 - Y) / eps error = np.abs(D1 - D).max() assert np.isclose(error, 0, atol=1e-5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gradient_convergence(self):\n pass", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)", "def test_linear_activation(self):\n self.assertEqual([0.5, 0.6], af.Linear().output([0.5, 0.6]))\n self.assertEqual([1, 1], list(\n af.Linear().derivative(np.array([0.5, 0.6]))))", "def test_gradient(gradient, thetas, activations_neural, classification_matrix, lambda_value=1, step=1E-4, tolerance=1E-4):\n \n dimensional_error(thetas[-1].shape, gradient[-1].shape)\n\n last_thetas = thetas[-1]\n \n last_thetas_plus_step = thetas[-1] + step\n last_thetas_minus_step = thetas[-1] - step\n\n num_grad_total = pd.DataFrame()\n\n for i in range( gradient[-1].shape[0] ):\n\n\n last_thetas_plus = pd.concat( [last_thetas[0:i], last_thetas_plus_step[i:i+1] , last_thetas[i+1:]] , axis=0 )\n\n last_thetas_minus = pd.concat( [last_thetas[0:i], last_thetas_minus_step[i:i+1], last_thetas[i+1:]] , axis=0 )\n\n last_activation_plus = activation_values(activations_neural[-2], last_thetas_plus ).to_numpy()\n last_activation_minus = activation_values(activations_neural[-2], last_thetas_minus).to_numpy()\n\n cost_plus = cost_function_sigmoid([last_activation_plus] , classification_matrix, [last_thetas_plus] , lambda_value)\n cost_minus = cost_function_sigmoid([last_activation_minus], classification_matrix, [last_thetas_minus], lambda_value)\n\n num_grad = (cost_plus - cost_minus)/(2*step) # it's a column DataFrame\n num_grad_total = pd.concat([num_grad_total, num_grad], axis=1)\n\n num_grad_total = num_grad_total.T\n\n dimensional_error(num_grad_total.shape, gradient[-1].shape)\n\n num_grad_total.index = gradient[-1].index\n num_grad_total.columns = gradient[-1].columns\n\n _ = ( np.abs( gradient[-1].to_numpy() - num_grad_total.to_numpy() ) <= tolerance )\n\n return _, num_grad_total", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def test_relu_activation(self):\n self.assertEqual([0, 0.5], list(\n af.Relu().output(np.array([-0.5, 0.5]))))\n self.assertEqual([0, 1], list(\n af.Relu().derivative(np.array([-0.5, 0.5]))))", "def testBackpropGradient(x, y, n_hidden_units):\n assert x.shape[1] == 784 and y.shape[1] == 10\n print('testBackpropGradient...')\n W1, b1, W2, b2 = initializeWeights(n_hidden_units, n_inputs=784, n_outputs=10)\n w = flattenW(W1, b1, W2, b2)\n point_to_check = w\n gradient_check = scipy.optimize.check_grad(JWrapper, gradJWrapper, point_to_check, \n x, y, n_hidden_units)\n print('check_grad() value: {}'.format(gradient_check))\n print('Gradient is good!' if gradient_check < 1e-4 else 'WARNING: bad gradient!')", "def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def check_gradient(f, g, x):\n x = np.asarray(x)\n return np.max(g(x) - gradient(f, x))", "def gradient(self, x):\n pass", "def gradient_approximation_test(model: BinaryModel, X: np.ndarray, Y: np.ndarray):\n w_orig = model.w.copy()\n epsilon = 1e-2\n image1 = X[0][:-1].reshape(28,28)\n plt.imshow(image1)\n plt.show()\n for i in range(w_orig.shape[0]):\n orig = model.w[i].copy()\n model.w[i] = orig + epsilon\n logits = model.forward(X)\n cost1 = cross_entropy_loss(Y, logits)\n model.w[i] = orig - epsilon\n logits = model.forward(X)\n cost2 = cross_entropy_loss(Y, logits)\n gradient_approximation = (cost1 - cost2) / (2 * epsilon)\n model.w[i] = orig\n # Actual gradient\n logits = model.forward(X)\n model.backward(X, logits, Y)\n #print_mnist_img(model.grad, i)\n if (i == 0):\n image2 = model.grad[:-1].reshape(28,28)\n plt.imshow(image2)\n plt.show()\n difference = gradient_approximation - model.grad[i, 0]\n \n #assert abs(difference) <= epsilon**2,\\\n if (abs(difference) <= epsilon**2):\n print(f\"Calculated gradient is incorrect. \" \\\n f\"Approximation: {gradient_approximation}, actual gradient at iteration {i}: {model.grad[i, 0]}\\n\" \\\n f\"If this test fails there could be errors in your cross entropy loss function, \" \\\n f\"forward function or backward function\")\n else:\n print(f\"Gradient est. {gradient_approximation}, actual gradient {model.grad[i, 0]}\")", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n \n e = np.eye(num_features)\n denominator = np.float(2*epsilon)\n numerator = np.array([ compute_square_loss(X_train,y_train,theta+epsilon*e[i]) - compute_square_loss(X_train,y_train,theta-epsilon*e[i]) for i in range(num_features) ] )\n diff = (true_gradient - numerator/denominator)\n \n return (diff.dot(diff) < tolerance)", "def test_softplus_activation(self):\n self.assertEqual(\n [0.4740769841801067, 0.9740769841801067], list(af.SoftPlus().output(np.array([-0.5, 0.5]))))\n self.assertEqual([0.3775406687981454, 0.6224593312018546], list(\n af.SoftPlus().derivative(np.array([-0.5, 0.5]))))", "def testLambertWGradient(self, value, expected):\n x = tf.constant(value, dtype=tf.float64)\n with tf.GradientTape() as g:\n g.watch(x)\n y = tfp.math.lambertw(x)\n\n dy_dx = g.gradient(y, x)\n self.assertAllClose(dy_dx, expected)", "def generic_gradient_checker(X, y, theta, objective_func, gradient_func, epsilon=0.01, tolerance=1e-4):\n #TODO\n true_gradient = gradient_func(X, y, theta) #The true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO\n e_i = np.zeros(num_features)\n for k in range(num_features):\n e_i[k] = 1\n approx_grad[k] = (objective_func(X, y, theta+epsilon*e_i)-objective_func(X, y, theta-epsilon*e_i))/(2*epsilon) \n e_i[k] = 0\n\n return np.sqrt(sum((true_gradient-approx_grad)**2)) < tolerance", "def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True", "def test_param_to_gradient(self):\n pass", "def _evaluate_gradient(self, **variables):\n pass", "def _test_gradient_against_estimate(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)\n q = np.zeros((test_obs, 10)).astype(dtype)\n q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1\n\n logits = array_ops.placeholder(dtype, name='z')\n sparsemax_op = sparsemax(logits)\n loss_op = sparsemax_loss(logits, sparsemax_op, q)\n\n with self.test_session(use_gpu=use_gpu):\n err = gradient_checker.compute_gradient_error(\n logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)\n\n self.assertLess(err, 1e-4)", "def test_gradients_update(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n # There should be no calculated gradient yet.\n for p in self.model.parameters():\n self.assertIsNone(p.grad)\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)\n\n polybeast.learn(*self.learn_args)\n\n # Check that every parameter for the learner model has a gradient, and that\n # there is at least some non-zero gradient for each set of paramaters.\n for p in self.model.parameters():\n self.assertIsNotNone(p.grad)\n self.assertFalse(torch.equal(p.grad, torch.zeros_like(p.grad)))\n\n # Check that the actor model has no gradients associated with it.\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def test_grad_test_values(self):\r\n backup = theano.config.compute_test_value\r\n theano.config.compute_test_value = 'raise'\r\n try:\r\n x = tensor.scalar('x')\r\n x.tag.test_value = 1\r\n # Used to crash due to undefined test value.\r\n tensor.grad(ifelse(0, x, x), x)\r\n finally:\r\n theano.config.compute_test_value = backup", "def test_parameter_gradients(net, X, Y, name, p, grad_p, loss, index):\n eps = 1e-7\n backup = p[index]\n p[index] += eps\n A1 = net.forward(X)\n loss1 = net.loss(Y, A1[-1])\n ratio = (loss1 - loss) / eps\n assert np.isclose(grad_p[index], ratio)\n p[index] = backup", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #The true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO\n e_i = np.zeros(num_features)\n for k in range(num_features):\n e_i[k] = 1\n approx_grad[k] = (compute_square_loss(X, y, theta+epsilon*e_i)-compute_square_loss(X, y, theta-epsilon*e_i))/(2*epsilon) \n e_i[k] = 0\n\n return np.sqrt(sum((true_gradient-approx_grad)**2)) < tolerance", "def test_leaky_relu_activation(self):\n self.assertEqual(\n [-0.0050, 0.5000], list(af.LeakyRelu(0.01).output(np.array([-0.5, 0.5]))))\n self.assertEqual([0.01, 1], list(af.LeakyRelu(\n 0.01).derivative(np.array([-0.5, 0.5]))))", "def sigmoid_gradient(z):\n #derivative of sigmoid\n return z * (1 - z)" ]
[ "0.71142787", "0.70332646", "0.6905601", "0.6849363", "0.6837006", "0.68183017", "0.6813721", "0.676131", "0.6752075", "0.6721996", "0.66837656", "0.66234875", "0.6619323", "0.6608141", "0.6590701", "0.6554544", "0.65324926", "0.6527291", "0.648666", "0.6469905", "0.6446868", "0.6429391", "0.6424775", "0.6421657", "0.641466", "0.6404057", "0.6391476", "0.6390645", "0.6372519", "0.636352" ]
0.78597736
0
Test the gradient of the loss wrt the parameters.
def test_parameter_gradients(net, X, Y, name, p, grad_p, loss, index): eps = 1e-7 backup = p[index] p[index] += eps A1 = net.forward(X) loss1 = net.loss(Y, A1[-1]) ratio = (loss1 - loss) / eps assert np.isclose(grad_p[index], ratio) p[index] = backup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)", "def test_gradient_convergence(self):\n pass", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def testBackpropGradient(x, y, n_hidden_units):\n assert x.shape[1] == 784 and y.shape[1] == 10\n print('testBackpropGradient...')\n W1, b1, W2, b2 = initializeWeights(n_hidden_units, n_inputs=784, n_outputs=10)\n w = flattenW(W1, b1, W2, b2)\n point_to_check = w\n gradient_check = scipy.optimize.check_grad(JWrapper, gradJWrapper, point_to_check, \n x, y, n_hidden_units)\n print('check_grad() value: {}'.format(gradient_check))\n print('Gradient is good!' if gradient_check < 1e-4 else 'WARNING: bad gradient!')", "def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff", "def test_update_parameters(model):\n train_inputs = torch.tensor([[1., 2., 3.]])\n train_loss = 0.5 * (model(train_inputs) ** 2)\n\n params = gradient_update_parameters(model,\n train_loss,\n params=None,\n step_size=0.5,\n first_order=False)\n\n assert train_loss.item() == 264.5\n assert list(params.keys()) == ['weight']\n assert torch.all(params['weight'].data == torch.tensor([[-9.5, -20., -29.5]]))\n\n \"\"\"\n The new loss function (still with respect to the weights of the model w) is\n defined as:\n g(w) = 0.5 * (4 * w'_1 + 5 * w'_2 + 6 * w'_3) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * df / dw_1)\n + 5 * (w_2 - 0.5 * df / dw_2)\n + 6 * (w_3 - 0.5 * df / dw_3)) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * 1 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 5 * (w_2 - 0.5 * 2 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 6 * (w_3 - 0.5 * 3 * (1 * w_1 + 2 * w_2 + 3 * w_3))) ** 2\n = 0.5 * ((4 - 4 * 0.5 - 5 * 1.0 - 6 * 1.5) * w_1\n + (5 - 4 * 1.0 - 5 * 2.0 - 6 * 3.0) * w_2\n + (6 - 4 * 1.5 - 5 * 3.0 - 6 * 4.5) * w_3) ** 2\n = 0.5 * (-12 * w_1 - 27 * w_2 - 42 * w_3) ** 2\n\n Therefore the gradient of the function g with respect to w (and evaluated\n at w = [2, 3, 5]) is:\n dg / dw_1 = -12 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 3780\n dg / dw_2 = -27 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 8505\n dg / dw_3 = -42 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 13230\n \"\"\"\n test_inputs = torch.tensor([[4., 5., 6.]])\n test_loss = 0.5 * (model(test_inputs, params=params) ** 2)\n\n grads = torch.autograd.grad(test_loss, model.parameters())\n\n assert test_loss.item() == 49612.5\n assert len(grads) == 1\n assert torch.all(grads[0].data == torch.tensor([[3780., 8505., 13230.]]))", "def test_param_to_gradient(self):\n pass", "def check_layer_param_gradient(layer, x,\n param_name,\n delta=1e-5, tol=1e-4):\n param = layer.params()[param_name]\n initial_w = param.value\n\n output = layer.forward(x)\n output_weight = np.random.randn(*output.shape)\n\n def helper_func(w):\n param.value = w\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n d_out = np.ones_like(output) * output_weight\n layer.backward(d_out)\n grad = param.grad\n return loss, grad\n\n return check_gradient(helper_func, initial_w, delta, tol)", "def _evaluate_gradient(self, **variables):\n pass", "def check_model_gradient(model, X, y,\n delta=1e-5, tol=1e-4):\n params = model.params()\n\n for param_key in params:\n print(\"Checking gradient for %s\" % param_key)\n param = params[param_key]\n initial_w = param.value\n\n def helper_func(w):\n param.value = w\n loss = model.compute_loss_and_gradients(X, y)\n grad = param.grad\n return loss, grad\n\n if not check_gradient(helper_func, initial_w, delta, tol):\n return False\n\n return True", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def test_gradients_update(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n # There should be no calculated gradient yet.\n for p in self.model.parameters():\n self.assertIsNone(p.grad)\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)\n\n polybeast.learn(*self.learn_args)\n\n # Check that every parameter for the learner model has a gradient, and that\n # there is at least some non-zero gradient for each set of paramaters.\n for p in self.model.parameters():\n self.assertIsNotNone(p.grad)\n self.assertFalse(torch.equal(p.grad, torch.zeros_like(p.grad)))\n\n # Check that the actor model has no gradients associated with it.\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)", "def test_gradient_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n gv = objax.GradValues(gp_model.energy, gp_model.vars())\n gv_markov = objax.GradValues(markovgp_model.energy, markovgp_model.vars())\n\n lr_adam = 0.1\n lr_newton = 1.\n opt = objax.optimizer.Adam(gp_model.vars())\n opt_markov = objax.optimizer.Adam(markovgp_model.vars())\n\n gp_model.update_posterior()\n gp_grads, gp_value = gv()\n gp_loss_ = gp_value[0]\n opt(lr_adam, gp_grads)\n gp_hypers = np.array([gp_model.kernel.lengthscale, gp_model.kernel.variance, gp_model.likelihood.variance])\n print(gp_hypers)\n print(gp_grads)\n\n markovgp_model.update_posterior()\n markovgp_grads, markovgp_value = gv_markov()\n markovgp_loss_ = markovgp_value[0]\n opt_markov(lr_adam, markovgp_grads)\n markovgp_hypers = np.array([markovgp_model.kernel.lengthscale, markovgp_model.kernel.variance,\n markovgp_model.likelihood.variance])\n print(markovgp_hypers)\n print(markovgp_grads)\n\n np.testing.assert_allclose(gp_grads[0], markovgp_grads[0], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[1], markovgp_grads[1], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[2], markovgp_grads[2], rtol=1e-4)", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def check_layer_param_gradient(layer, x,\n param_name,\n delta=1e-5, tol=1e-4):\n param = layer.params()[param_name]\n initial_w = param.value\n\n layer.clear_grads()\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(w):\n param.value = w\n layer.clear_grads()\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n layer.backward(d_out)\n grad = param.grad\n return loss, grad\n\n return check_gradient(helper_func, initial_w, delta, tol)", "def gradient_check(op, *args, **kwargs):\n\n if( not 'id_list' in kwargs.keys() ):\n kwargs.update({\"id_list\":[0]})\n\n id_list = kwargs.get(\"id_list\", [0])\n\n for i in id_list:\n\n if(not isinstance(args[i], Variable)):\n raise Exception(\"input {:g} is not a variable\".format(i))\n\n if(isinstance(args[i], Variable) and not args[i].requires_grad):\n raise Exception(\"input {:g} doesn't require gradient\".format(i))\n\n nelems = args[i].numel()\n\n \"\"\" numerical gradient \"\"\"\n\n wrapper, p = numdiff_wrapper(op, args, kwargs, i)\n jacobian_numerical = numdiff_unified(wrapper, p)\n\n \"\"\" analytic gradient \"\"\"\n\n jacobian_analytic = []\n\n if(len(kwargs.keys()) > 1):\n \"\"\"function has dictionary inputs\"\"\"\n f = op(*args, **kwargs)\n else:\n f = op(*args)\n\n output_nelems = f.data.numel()\n\n for k in range(output_nelems):\n\n output_grad = torch.zeros(f.data.size())\n output_grad.view(output_nelems, 1)[k] = 1\n\n f.backward(output_grad, retain_variables=True)\n\n jacobian_analytic.append( np.copy( args[i].grad.data.view( nelems ).numpy() ) )\n\n for params_i in args:\n if(isinstance(params_i, torch.autograd.Variable) and params_i.requires_grad):\n params_i.grad.data.zero_()\n\n jacobian_analytic = np.asarray(jacobian_analytic)\n\n \"\"\"\n compare jacobian_analytic with jacobian_numerical\n \"\"\"\n\n if( np.allclose(jacobian_analytic, jacobian_numerical) ):\n\n print \"gradient is correct\"\n\n else:\n\n rel_error = np.linalg.norm( jacobian_analytic - jacobian_numerical ) / \\\n np.maximum( np.linalg.norm( jacobian_analytic ), np.linalg.norm( jacobian_numerical) )\n\n print 'analytic jacobian :'\n print jacobian_analytic\n\n print 'numerical jacobian :'\n print jacobian_numerical\n\n print 'jacobian difference :'\n print jacobian_analytic - jacobian_numerical\n\n print 'relative error:'\n print rel_error", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n \n e = np.eye(num_features)\n denominator = np.float(2*epsilon)\n numerator = np.array([ compute_square_loss(X_train,y_train,theta+epsilon*e[i]) - compute_square_loss(X_train,y_train,theta-epsilon*e[i]) for i in range(num_features) ] )\n diff = (true_gradient - numerator/denominator)\n \n return (diff.dot(diff) < tolerance)", "def test_gradients(self):\n ex = self._create_example()\n decoder_input_fn = FixedDecoderInputs(\n inputs=tf.convert_to_tensor(\n ex.target, dtype=tf.float32),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n model = self.create_model()\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n # Get a loss to optimize\n losses = seq2seq_losses.cross_entropy_sequence_loss(\n logits=decoder_output.logits,\n targets=tf.ones_like(decoder_output.predictions),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n mean_loss = tf.reduce_mean(losses)\n\n optimizer = tf.train.AdamOptimizer()\n grads_and_vars = optimizer.compute_gradients(mean_loss)\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n _, grads_and_vars_ = sess.run([train_op, grads_and_vars])\n\n for grad, _ in grads_and_vars_:\n self.assertFalse(np.isnan(grad).any())", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def testLambertWGradient(self, value, expected):\n x = tf.constant(value, dtype=tf.float64)\n with tf.GradientTape() as g:\n g.watch(x)\n y = tfp.math.lambertw(x)\n\n dy_dx = g.gradient(y, x)\n self.assertAllClose(dy_dx, expected)", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def check_gradients(model, X, Y, eps=1e-5):\n\n # Import methods from the model\n layers = model.layers\n regularizer = model.regularizer\n propagate_forward = model.propagate_forward\n compute_cost = model.compute_cost\n propagate_backward = model.propagate_backward\n\n # Dirty regularizers such as dropout may yield errors\n assert(regularizer is None)\n for layer in layers:\n assert(not isinstance(layer, Dropout))\n assert(not isinstance(layer, BatchNorm))\n\n # Get params currently stored in the layers (for reset)\n params = roll_params(layers, 'params')\n grads = roll_params(layers, 'grads')\n\n # Perform one iteration on X and Y to compute and store new gradients\n out = propagate_forward(X)\n propagate_backward(out, Y)\n\n # Extract new gradients and roll them into a vector\n param_theta = roll_params(layers, 'params')\n grad_theta = roll_params(layers, 'grads')\n\n # Initialize vector of the same shape for approximated gradients\n num_params = len(param_theta)\n grad_approx = np.zeros(num_params)\n\n # Repeat for each number in the vector\n for i in range(num_params):\n # Use two-sided Taylor approximation which is 2x more precise than one-sided\n # Add epsilon to the number\n # Note: Epsilon higher than 1e-5 likely to produce numeric instability\n theta_plus = np.copy(param_theta)\n theta_plus[i] = theta_plus[i] + eps\n # Calculate new cost\n unroll_params(theta_plus, layers, 'params')\n out_plus = propagate_forward(X, predict=True)\n cost_plus = compute_cost(out_plus, Y)\n\n # Subtract epsilon from the number\n theta_minus = np.copy(param_theta)\n theta_minus[i] = theta_minus[i] - eps\n # Calculate new cost\n unroll_params(theta_minus, layers, 'params')\n out_minus = propagate_forward(X, predict=True)\n cost_minus = compute_cost(out_minus, Y)\n\n # Approximate the gradient, error is eps^2\n grad_approx[i] = (cost_plus - cost_minus) / (2 * eps)\n\n # Reset model params\n unroll_params(params, layers, 'params')\n unroll_params(grads, layers, 'grads')\n\n # Compute relative error\n relative_error = calculate_diff(grad_theta, grad_approx)\n\n return relative_error", "def _test_gradient_against_estimate(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)\n q = np.zeros((test_obs, 10)).astype(dtype)\n q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1\n\n logits = array_ops.placeholder(dtype, name='z')\n sparsemax_op = sparsemax(logits)\n loss_op = sparsemax_loss(logits, sparsemax_op, q)\n\n with self.test_session(use_gpu=use_gpu):\n err = gradient_checker.compute_gradient_error(\n logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)\n\n self.assertLess(err, 1e-4)", "def gradient_approximation_test(model: BinaryModel, X: np.ndarray, Y: np.ndarray):\n w_orig = model.w.copy()\n epsilon = 1e-2\n image1 = X[0][:-1].reshape(28,28)\n plt.imshow(image1)\n plt.show()\n for i in range(w_orig.shape[0]):\n orig = model.w[i].copy()\n model.w[i] = orig + epsilon\n logits = model.forward(X)\n cost1 = cross_entropy_loss(Y, logits)\n model.w[i] = orig - epsilon\n logits = model.forward(X)\n cost2 = cross_entropy_loss(Y, logits)\n gradient_approximation = (cost1 - cost2) / (2 * epsilon)\n model.w[i] = orig\n # Actual gradient\n logits = model.forward(X)\n model.backward(X, logits, Y)\n #print_mnist_img(model.grad, i)\n if (i == 0):\n image2 = model.grad[:-1].reshape(28,28)\n plt.imshow(image2)\n plt.show()\n difference = gradient_approximation - model.grad[i, 0]\n \n #assert abs(difference) <= epsilon**2,\\\n if (abs(difference) <= epsilon**2):\n print(f\"Calculated gradient is incorrect. \" \\\n f\"Approximation: {gradient_approximation}, actual gradient at iteration {i}: {model.grad[i, 0]}\\n\" \\\n f\"If this test fails there could be errors in your cross entropy loss function, \" \\\n f\"forward function or backward function\")\n else:\n print(f\"Gradient est. {gradient_approximation}, actual gradient {model.grad[i, 0]}\")", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True", "def optimization(err_acc, learning_rate):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n tvars = tf.trainable_variables()\n grads = tf.gradients(err_acc, tvars)\n tg_pairs = [(tf.clip_by_value(k[0], -100, 100), k[1]) for k in zip(grads, tvars) if k[0] is not None]\n train_op = optimizer.apply_gradients(tg_pairs)\n return train_op", "def test_wrong_gradients_raises_assertion(self):\n model = PoincareModel(self.data, negative=3)\n model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))\n with self.assertRaises(AssertionError):\n model.train(epochs=1, batch_size=1, check_gradients_every=1)" ]
[ "0.74941343", "0.7425341", "0.7355039", "0.7324828", "0.7287194", "0.72620076", "0.7215311", "0.71104115", "0.71081114", "0.7087239", "0.7085801", "0.70754427", "0.697028", "0.6940578", "0.69372624", "0.68951434", "0.68781227", "0.68604034", "0.68522644", "0.6837919", "0.6836612", "0.6814189", "0.6798799", "0.67407286", "0.66915375", "0.6687449", "0.66737455", "0.6660569", "0.6646179", "0.6632525" ]
0.7573967
0
Swaps elements A and B in a list.
def listSwapElement(lst, indexa, indexb): temp = lst[indexa] lst[indexa] = lst[indexb] lst[indexb] = temp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _swap(mylist, a, b):\n temp = mylist[a]\n mylist[a] = mylist[b]\n mylist[b] = temp", "def swap(lst, a, b):\r\n temp = lst[a]\r\n lst[a] = lst[b]\r\n lst[b] = temp", "def swap(self, index_a:int, index_b:int):\n if not index_a == index_b:\n self.list[index_a], self.list[index_b] = self.list[index_b], self.list[index_a]", "def swap(in_list: List, index1: int, index2: int) -> List:\n\n in_list[index1], in_list[index2] = in_list[index2], in_list[index1] \n\n return in_list", "def swap_elements(i: int, j: int, arr: List[int]) -> None:\n arr[i], arr[j] = arr[j], arr[i]", "def swap(theList, i, j):\n\n temp = theList[i]\n theList[i] = theList[j]\n theList[j] = temp", "def swap(C):\n \n return [c.swap() for c in C]", "def swap(lst: list, index_1: int, index_2: int) -> None:\n lst[index_1], lst[index_2] = lst[index_2], lst[index_1]", "def swap(a, b): #0(3)\r\n temp = numList[a] #0(1)\r\n numList[a] = numList[b] #0(1)\r\n numList[b] = temp #0(1)\r", "def swap(a,b):\n temp = a\n a = b\n b = temp\n return(a,b)", "def __elementSwap(self,\n index1: int,\n index2: int):\n self.__ordered_holder[index1], self.__ordered_holder[index2] = self.__ordered_holder[index2], self.__ordered_holder[index1]", "def swap_one(list_one, list_two):\n i = random.choice(list_one)\n j = random.choice(list_two)\n\n list_one.remove(i)\n list_two.remove(j)\n\n list_one.append(j)\n list_two.append(i)", "def list_swap_i(\n l: list,\n i1: int,\n i2: int,\n ) -> list: \n\n l[i1], l[i2] = l[i2], l[i1]\n\n return l", "def modSwapSort(L):\n print(\"Original L: \", L)\n for i in range(len(L)):\n for j in range(len(L)):\n if L[j] < L[i]:\n # the next line is a short\n # form for swap L[i] and L[j]\n L[j], L[i] = L[i], L[j]\n print(L)\n print(\"Final L: \", L)", "def swap(A, index1, index2):\r\n \r\n temp = A[index1]\r\n A[index1] = A[index2]\r\n A[index2] = temp", "def swapSort(L):\n print(\"Original L: \", L)\n for i in range(len(L)):\n for j in range(i + 1, len(L)):\n if L[j] < L[i]:\n # the next line is a short\n # form for swap L[i] and L[j]\n L[j], L[i] = L[i], L[j]\n print(L)\n print(\"Final L: \", L)", "def reorder_as(A, B):\n C = intersect(B, A) + setdiff(A, B)\n try:\n return type(A)(C)\n except TypeError:\n return list(C)", "def rearrange_array(B: List[int], index: int):\n\toriginal_elem = B[index]\n\tindex += 1\n\tlen_B = len(B)\n\twhile index < len_B and B[index] < original_elem:\n\t\tB[index - 1] = B[index]\n\t\tindex += 1\n\tB[index - 1] = original_elem", "def swap_nodes(self, a, b):\n if a == b:\n return\n if len(self) < 2:\n return\n\n nodeA = nodeB = None\n curr_node = self._header\n\n while curr_node is not None and not (nodeA and nodeB):\n if curr_node._element == a and not nodeA:\n nodeA = curr_node\n elif curr_node._element == b and not nodeB:\n nodeB = curr_node\n curr_node = curr_node._next\n\n if curr_node is None:\n raise Empty(\"Not in list\")\n\n precessorA = nodeA._prev\n successorA = nodeA._next\n precessorB = nodeB._prev\n successorB = nodeB._next\n\n precessorA._next = successorA._prev = nodeB\n precessorB._next = successorB._prev = nodeA\n\n nodeA._prev, nodeB._prev = nodeB._prev, nodeA._prev\n nodeA._next, nodeB._next = nodeB._next, nodeA._next", "def merge(self, A: List[int], m: int, B: List[int], n: int) -> None:\n A[m:] = B\n A.sort()", "def swap(self):\n if self.cnt_swap == 0:\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n elif self.cnt_swap < self.nb_swaps:\n i = self.swaplist[self.cnt_swap - 1][0]\n j = self.swaplist[self.cnt_swap - 1][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n else:\n return 0\n self.cnt_swap += 1\n return 1", "def swap(i: int, j: int, data: List[int]) -> None:\n temp = data[i]\n data[i] = data[j]\n data[j] = temp", "def swap(arr, first, second):\n arr[first], arr[second] = arr[second], arr[first]", "def swap(values: list, i = int, j = int) -> None:\n \n temp: int = values[i]\n values[i] = values[j]\n values[j] = temp", "def swap(arr, left, right):\n arr[left], arr[right] = arr[right], arr[left]", "def swap(x, i, j):\n if not isinstance(x, type([1, 2])):\n raise TypeError(\"Este método solo se puede hacer con listas\")\n x[i], x[j] = x[j], x[i]", "def swap(arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]", "def swap_numbers(numbers, index1, index2):\n temp = numbers[index1]\n numbers[index1] = numbers[index2]\n numbers[index2] = temp", "def swapBetweenLists(values1, values2):\n\tp1 = randint(0, len(values1)-1)\n\tp2 = randint(0, len(values2)-1)\n\ttmp = values1[p1]\t\n\tvalues1[p1] = values2[p2]\n\tvalues2[p2] = tmp", "def swap(t, i, j):\n t[i], t[j] = t[j], t[i]" ]
[ "0.74426883", "0.72412217", "0.7158518", "0.70567393", "0.67977434", "0.6741206", "0.67259157", "0.6641364", "0.653265", "0.6483142", "0.6444933", "0.64251614", "0.64234024", "0.6352456", "0.632575", "0.6301343", "0.6298016", "0.6295743", "0.6288338", "0.62535304", "0.6230418", "0.62017727", "0.6190003", "0.6171845", "0.61665183", "0.6149033", "0.6130468", "0.6092921", "0.6089489", "0.6058106" ]
0.73260695
1
Execute a list of plans, this list is returned when solving a task.
def execute_plans(robot, plans): # make sure the robot is actually in the home position # before executing a plan robot.mg.set_joint_value_target( plans[0].joint_trajectory.points[0].positions) robot.mg.go(wait=True) print("Moved to home, start executing task.") # TODO quick fix, add first point to lin path plans[1].joint_trajectory.points.insert( 0, plans[0].joint_trajectory.points[-1]) for plan in plans: print("========================================") print("executing plan of lenght") print(len(plan.joint_trajectory.points)) print(plan.joint_trajectory.points[0]) print(plan.joint_trajectory.points[1]) print("\n...\n") print(plan.joint_trajectory.points[-1]) print("========================================") # print(plan) robot.mg.execute(plan, wait=True) rospy.sleep(1.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def executePlans(update):\n out.header('Executing plans %r\\n' % (update))\n # Finding the functions to call is actually done by a 'iterator' like function in the plangraph module\n while(True):\n # This function either returns None or a tuple just like generate added to it\n p = update.plans.getNextTodo()\n\n # No more to do?\n if(not p):\n break\n\n # Explode tuple otherwise\n func, args = p\n\n # We are in a try-except block so if func isn't callable that will catch it\n try:\n out.verbose('Calling %s\\n' % (func))\n update.progress(\"Calling {}\".format(func.__name__))\n #\n # Call the function from the execution plan\n #\n # args may be empty, but we don't want to pass in a tuple if we don't need to.\n # This below explodes the args so if @args is (), then what is passed is @update\n skipme = func(*((update, ) + args))\n\n except Exception as e:\n out.exception(e, True)\n # plans = str(update.plans)) # Removed because breaks new out.exception call\n out.warn(\"Failed to execute plan %s%s\" % (func.__name__, args))\n update.responses.append({'exception': str(e), 'traceback': traceback.format_exc()})\n update.failure = str(e)\n return True\n\n # The functions we call here can return other functions, if they do\n # these are functions that should be skipped later on (for instance a\n # set* function discovering it didn't change anything, later on we\n # shouldn't call the corresponding reload function)\n if skipme is not None:\n # If the function returned a Deferred, we will drop out of the\n # execution pipeline and resume later.\n if isinstance(skipme, Deferred):\n out.verbose('Function {} returned a Deferred'.format(func))\n return skipme\n\n # These functions can return individual functions to skip, or a\n # list of multiple functions\n elif callable(skipme):\n skipme = [skipme]\n\n for skip in skipme:\n out.warn('Identified a skipped function: %r\\n' % (skip))\n update.plans.registerSkip(skip)\n\n # Now we are done\n return False", "def execute(self, opts):\n if opts.list:\n self.list_plans(opts.output == 'json')\n elif opts.get:\n self.get_plan(opts.name, opts.output == 'json')\n elif opts.remove:\n self.remove_plan(opts.name)\n elif opts.add:\n self.add_plan(opts.name, opts.services, opts.task, opts.description)", "def executePlan(self, plan, wait=True):\n return self.move_group.execute(plan, wait)", "def plans(self, plans):\n\n self._plans = plans", "def __call__(self, *args, **kwargs):\n result_ids = [sy.ID_PROVIDER.pop()]\n\n response = self.request_execute_plan(self.location, result_ids, *args)\n\n return response", "def plans():", "def plans(self):\r\n return pl.Plans(self)", "def execute(self):\n\n self._status = 'Running'\n\n for test_plan in self._test_plans:\n try:\n test_plan.execute()\n except Failure as e:\n self._status = 'Fail'\n self._message = ('The \"{0}\" test plan in the test run \"{1}\" failed with the '\n 'message: \"{2}\"'.format(test_plan.name, self.name, e.msg))\n except FatalError as e:\n self._status = 'Fail'\n self._message = ('The \"{0}\" test plan in the test run \"{1}\" encountered the fatal '\n 'error: \"{2}\"'.format(test_plan.name, self.name, e.msg))\n raise FatalError(self._message)\n\n if self._status == 'Fail':\n raise Failure(self._message)\n\n self._status = 'Pass'", "def execute_trajectory(self):\n self.plan = self.group.plan()\n result = self.group.go(wait=True)\n return result", "def plan_and_execute(self, planning_prob, real=False, T=2500, stack=True, start_idx=0, ignore_resets=False):\n # Initialize variables\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n\n if self.use_planning_server:\n # Send a reset request to the planning server\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n\n while num_success < num_steps:\n try:\n # PLANNING\n # If using planning server, request a plan from the server using ROS\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state[num_success].name]\n\n # Wait for a valid plan\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print(\"Getting a plan from server...\")\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print(\"Planning failed on server side.\")\n # If failure happened during stacking, it is a fatal failure\n if (ros_req.goal_state[num_success].stack):\n print(f\"Failed during stacking {query_block}\")\n fatal = True\n # If failure happened during resetting, prompt user to manually reset blocks\n else:\n print(f\"Failed during resetting {query_block}\")\n input(\"Manually reset the blocks and press Enter to continue\")\n if real:\n self._update_block_poses()\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.execution_robot, self.pddl_block_lookup)\n\n # Otherwise, plan locally\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n \n # Unpack initial conditions\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps/2) and self.alternate_orientations:\n init += [(\"Reset\",)]\n goal_terms.append((\"AtHome\", blk))\n else:\n init += [('Pose', blk, blk_pose),\n ('Supported', blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n \n # Plan with PDDLStream\n pddl_info = get_pddlstream_info(self.robot,\n fixed_objs,\n self.pddl_blocks,\n add_slanted_grasps=True,\n approach_frame='global',\n use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal, \n search_sample_ratio=1.0, \n max_time=INF)\n if plan is None:\n print(\"\\nFailed to plan\\n\")\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n saved_world.restore()\n\n print(\"\\nGot plan:\")\n print(plan)\n\n # Once we have a plan, execute it\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False, prompt=False, obstacles=obstacles, \n sim_fatal_failure_prob=0.0, sim_recoverable_failure_prob=0.0)\n\n # Manage the moved blocks (add to the set when stacking, remove when unstacking)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n\n # Check stability\n if not real:\n self.step_simulation(T, vis_frames=False)\n #input('Press enter to check stability.')\n if stack:\n stable = self.check_stability(real, query_block, desired_pose)\n else:\n stable = True # Don't care about stability on reset\n\n if stable == 0.:\n prompt = input('Tower NOT stable. Is this true? [y: Unstable / n: Stable]')\n if prompt == 'n':\n stable = 1.\n #input('Continue?')\n\n # Manage the success status of the plan\n if stable == 0.:\n print(\"Unstable after execution!\")\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps/2:\n print(\"Completed tower stack!\")\n stack_stable = True\n stack = False\n if ignore_resets:\n return True, stack_stable, reset_stable, num_success, False\n elif num_success == num_steps:\n print(\"Completed tower reset!\")\n reset_stable = True\n return True, stack_stable, reset_stable, num_success, False\n\n except ExecutionFailure as e:\n print(\"Planning/execution failed.\")\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')", "def request_execute_plan(\n self,\n location: \"sy.workers.BaseWorker\",\n response_ids: List[Union[str, int]],\n *args,\n **kwargs,\n ) -> object:\n plan_name = f\"plan{self.id}\"\n # args, _, _ = hook_args.unwrap_args_from_function(\n # plan_name, args, {}\n # )\n args = [args, response_ids]\n\n command = (\"execute_plan\", self.id_at_location, args, kwargs)\n\n response = self.owner.send_command(\n message=command, recipient=location, return_ids=response_ids\n )\n response = hook_args.hook_response(plan_name, response, wrap_type=FrameworkTensor[0])\n response.garbage_collect_data = False\n return response", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def plan(self):\n\n self.logger.info(\"*** start planning......\")\n\n request_list = self.dbh.get_requests()\n\n if len(request_list) > 0:\n if not self._handle_requests(request_list):\n self.logger.error(\"while planning\")\n return False\n else:\n self.logger.error(\"while reading plan\")\n return False\n\n return True", "def _plan_workorders(self, replan=False):\n self.ensure_one()\n\n if not self.workorder_ids:\n return\n # Schedule all work orders (new ones and those already created)\n qty_to_produce = max(self.product_qty - self.qty_produced, 0)\n qty_to_produce = self.product_uom_id._compute_quantity(qty_to_produce, self.product_id.uom_id)\n start_date = max(self.date_planned_start, datetime.datetime.now())\n if replan:\n workorder_ids = self.workorder_ids.filtered(lambda wo: wo.state in ['ready', 'pending'])\n # We plan the manufacturing order according to its `date_planned_start`, but if\n # `date_planned_start` is in the past, we plan it as soon as possible.\n workorder_ids.leave_id.unlink()\n else:\n workorder_ids = self.workorder_ids.filtered(lambda wo: not wo.date_planned_start)\n for workorder in workorder_ids:\n workcenters = workorder.workcenter_id | workorder.workcenter_id.alternative_workcenter_ids\n\n best_finished_date = datetime.datetime.max\n vals = {}\n for workcenter in workcenters:\n # compute theoretical duration\n if workorder.workcenter_id == workcenter:\n duration_expected = workorder.duration_expected\n else:\n duration_expected = workorder._get_duration_expected(alternative_workcenter=workcenter)\n\n from_date, to_date = workcenter._get_first_available_slot(start_date, duration_expected)\n # If the workcenter is unavailable, try planning on the next one\n if not from_date:\n continue\n # Check if this workcenter is better than the previous ones\n if to_date and to_date < best_finished_date:\n best_start_date = from_date\n best_finished_date = to_date\n best_workcenter = workcenter\n vals = {\n 'workcenter_id': workcenter.id,\n 'duration_expected': duration_expected,\n }\n\n # If none of the workcenter are available, raise\n if best_finished_date == datetime.datetime.max:\n raise UserError(_('Impossible to plan the workorder. Please check the workcenter availabilities.'))\n\n # Instantiate start_date for the next workorder planning\n if workorder.next_work_order_id:\n start_date = best_finished_date\n\n # Create leave on chosen workcenter calendar\n leave = self.env['resource.calendar.leaves'].create({\n 'name': workorder.display_name,\n 'calendar_id': best_workcenter.resource_calendar_id.id,\n 'date_from': best_start_date,\n 'date_to': best_finished_date,\n 'resource_id': best_workcenter.resource_id.id,\n 'time_type': 'other'\n })\n vals['leave_id'] = leave.id\n workorder.write(vals)\n self.with_context(force_date=True).write({\n 'date_planned_start': self.workorder_ids[0].date_planned_start,\n 'date_planned_finished': self.workorder_ids[-1].date_planned_finished\n })", "def _execute(self, req: pb2.ExecutePlanRequest) -> None:\n logger.info(\"Execute\")\n\n def handle_response(b: pb2.ExecutePlanResponse) -> None:\n if b.session_id != self._session_id:\n raise SparkConnectException(\n \"Received incorrect session identifier for request: \"\n f\"{b.session_id} != {self._session_id}\"\n )\n\n try:\n if self._use_reattachable_execute:\n # Don't use retryHandler - own retry handling is inside.\n generator = ExecutePlanResponseReattachableIterator(\n req, self._stub, self._retry_policy, self._builder.metadata()\n )\n for b in generator:\n handle_response(b)\n else:\n for attempt in self._retrying():\n with attempt:\n for b in self._stub.ExecutePlan(req, metadata=self._builder.metadata()):\n handle_response(b)\n except Exception as error:\n self._handle_error(error)", "def search_for_plans(start, exits, pig_neighbours, moves, state, actions):\n goals = exits + pig_neighbours\n paths, _ = GamePlanner.astar_multi_search(start=start,\n goals=goals,\n state=state,\n actions=actions)\n plans = GamePlanner.paths_to_plans(paths=paths,\n exits=exits,\n pig_neighbours=pig_neighbours,\n moves=moves)\n return plans", "def execute_plan(conf, plan, grouped_against=None, grouped_by=None):\n\n log = logging.getLogger(__name__)\n try:\n if isinstance(conf, list) or isinstance(conf, tuple):\n sample_conf = conf[0]\n log.info(\"Executing SimulationGroup plan\")\n if not sample_conf['General']['save_as']:\n obj = SimulationGroup([Simulation(simulator=Simulator(c)) for c\n in conf],\n grouped_against=grouped_against,\n grouped_by=grouped_by)\n else:\n obj = SimulationGroup([Simulation(conf=c) for c in conf],\n grouped_against=grouped_against,\n grouped_by=grouped_by)\n ID = str(obj)\n else:\n log.info(\"Executing Simulation plan\")\n if not conf['General']['save_as']:\n obj = Simulation(simulator=Simulator(conf))\n else:\n obj = Simulation(conf=conf)\n # Concatenate the field components in each layer into a single 3D\n # array, but add to blacklist so the concatenated arrays don't get\n # written to disk\n # FIXME: This is grossly memory-inefficient\n for f in ('Ex', 'Ey', 'Ez'):\n ks = ['{}_{}'.format(lname, f) for lname in obj.layers.keys()]\n obj.data[f] = np.concatenate([obj.data[k] for k in ks])\n obj.data.add_to_blacklist(f)\n ID = obj.id[0:10]\n\n for task_name in ('crunch', 'plot'):\n if task_name not in plan:\n continue\n task = plan[task_name]\n log.info(\"Beginning %s for obj %s\", task_name, ID)\n for func, data in task.items():\n if not data['compute']:\n continue\n else:\n argsets = data['args']\n if argsets and isinstance(argsets[0], list):\n for argset in argsets:\n if argset:\n _call_func(func, obj, argset)\n else:\n _call_func(func, obj, [])\n else:\n if argsets:\n _call_func(func, obj, argsets)\n else:\n _call_func(func, obj, [])\n log.info(\"Completed %s for obj %s\", task_name, ID)\n log.info(\"Plan execution for obj %s complete\", ID)\n if isinstance(obj, Simulation):\n log.info(\"Saving and clearing data for Simulation %s\", ID)\n if obj.conf['General']['sample_dict']:\n obj.write_data(blacklist=('normE', 'normEsquared', 'Ex', 'Ey',\n 'Ez'))\n else:\n obj.write_data()\n obj.clear_data()\n else:\n log.info(\"Clearing data for SimulationGroup %s\", ID)\n for sim in obj.sims:\n sim.clear_data()\n except Exception as e:\n if isinstance(conf, list) or isinstance(conf, tuple):\n log.error('Group raised exception')\n else:\n log.error('Conf %s raised exception', conf['General']['sim_dir'])\n raise", "def plans(self):\r\n return Plans(self)", "def execute(self):\n return SLAResults(self.execute_votable(), self.getqueryurl())", "def execute(self):\n return SLAResults(self.execute_votable(), self.getqueryurl())", "def list(cls):\n return cls().requests.get('plan')", "def get_plans(self):\n return stripe.Plan.all()", "def plan_list_post(request):\n company = auth_api_key(request)\n form = validate_form(PlanCreateForm, request)\n \n plan_type = form.data['plan_type']\n amount = form.data['amount']\n frequency = form.data['frequency']\n interval = form.data['interval']\n if interval is None:\n interval = 1\n company_guid = company.guid\n\n # TODO: make sure user cannot create a post to a deleted company\n\n model = PlanModel(request.session)\n type_map = dict(\n charge=model.TYPE_CHARGE,\n payout=model.TYPE_PAYOUT,\n )\n plan_type = type_map[plan_type]\n freq_map = dict(\n daily=model.FREQ_DAILY,\n weekly=model.FREQ_WEEKLY,\n monthly=model.FREQ_MONTHLY,\n yearly=model.FREQ_YEARLY,\n )\n frequency = freq_map[frequency]\n\n with db_transaction.manager:\n guid = model.create(\n company_guid=company_guid, \n plan_type=plan_type,\n amount=amount, \n frequency=frequency, \n interval=interval, \n )\n plan = model.get(guid)\n return plan", "def execute(self):\n self.status_message = \"State: Execute - Executing Motion Plan\"\n self.current_state = \"execute\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"notp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n # TODO: Send the waypoints to the trajectory planner and break if estop\n if self.next_state == \"estop\":\n break\n self.rexarm.set_positions(full_wp)\n time.sleep(1.5)", "def list_plans(self, json_output: bool = False):\n plans, errors = self.rest.list_backup_plans()\n _exit_if_errors(errors)\n if json_output:\n print(json.dumps(plans, indent=2))\n else:\n self.human_print_plans(plans)", "def get(self, **kwargs):\n _plans = self._plans.query(**kwargs)\n\n if not _plans:\n raise PlanNotFoundError\n\n return _plans", "def _execute(self):\n return self.warrior.filter_tasks(self.filter_obj)", "def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)" ]
[ "0.671764", "0.6583761", "0.6341465", "0.6332959", "0.6236808", "0.6156332", "0.6097737", "0.608058", "0.59968126", "0.59372807", "0.5844701", "0.58349496", "0.58101356", "0.5794742", "0.5718878", "0.5680914", "0.5639629", "0.5627402", "0.5588373", "0.55457616", "0.55041856", "0.55041856", "0.5497411", "0.5466747", "0.5450354", "0.54241216", "0.53702664", "0.5366636", "0.5362386", "0.53590745" ]
0.7731418
1
Deletes a business_report from the table with the specified id
def delete_by_id(id: int) -> List: business_report = BusinessNotificationService.get_by_id(id) if not business_report: return [] db.session.delete(business_report) db.session.commit() return [id]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def storage_delete_report(self, report_id):\n self._get_queryset(report_id=report_id).delete()", "def delete_business(yelp_id, conn):\n return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))", "def delete(self, report_id=None):\n if report_id is not None and isinstance(report_id, str):\n return self.collection.remove({'_id': ObjectId(report_id)})\n else:\n return self.collection.remove({'_id': report_id})", "def delete_report(report_id):\n report = Report.query.get(report_id)\n if report is None:\n flash(\n \"Report not found!\",\n \"alert-warning\",\n )\n elif not current_user.is_admin and not report.user.id == current_user.id:\n flash(\n \"You don't have permission to delete that.\",\n \"alert-warning\",\n )\n else:\n # Before deleting the report, check to see if any other users have\n # favorited this report. If so, simply transfer ownership to them\n current_user.unfavorite(report)\n if report.favorite_users:\n user = report.favorite_users[0]\n report.user = user\n db.session.commit()\n flash(\n \"Report ownership was transferred to {{ user.name }} since \"\n \"the report was in that user's favorites list.\",\n \"alert-success\",\n )\n else:\n db.session.delete(report)\n db.session.commit()\n flash(\n \"Report deleted\",\n \"alert-success\",\n )\n return redirect(request.args.get('next') or url_for('reports.my_reports'))", "def delete_by_id(cls, id):\n\t\tbook = Book.query.get(id)\n\t\tdb.session.delete(book)\n\t\tdb.session.commit()", "def delete(self, id):\n\n query = \"DELETE FROM {} WHERE id = {}\".format(self.table, id)\n\n self.remove(query)\n return True", "def delete(self, _id):\n self._db[_id].delete()", "def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()", "def delete_specific_incident(self, incident_id):\n self.cursor.execute(\"\"\"DELETE FROM incidents WHERE incident_id ='%s' AND status='draft'\n \"\"\" %(incident_id))\n self.commiting()\n return incident_id", "def delete(self, bill_id):\n bill = BillModel.find_by_id(bill_id)\n if bill:\n bill.delete_from_db()\n\n return {'message': 'Bill deleted'}", "def delete(self, id):\n self.cursor.execute(\"DELETE FROM Book WHERE Id = ?\", (id,))\n self.connection.commit()", "def delete_business(current_user, businessId):\n business = Business.query.get(int(businessId))\n\n if not (business and business.user_id == current_user.id):\n return make_json_reply(\n 'message',\n 'Business id might not exist or you have no right to delete business'\n ), 404\n\n business_name = business.name\n db.session.delete(business)\n\n return make_json_reply(\n 'message', 'Successfully deleted business ' + str(business_name)), 200", "def delete_record(self, id_: str) -> None:\n instance = self._get(id_)\n self._delete_from_db(instance)", "def delete(self, _id):", "def _delete (self):\n self._exec ('delete from table_name where id=%(id)s')", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def delete(self, id):\n empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoeliminar:\n db.session.delete(empleadoeliminar)\n db.session.commit()\n return 201\n api.abort(404)", "def delete_reports(\n self,\n report_root_id, # type: str\n if_match=None, # type: Optional[str]\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.delete_reports.metadata['url'] # type: ignore\n path_format_arguments = {\n 'reportRoot-id': self._serialize.url(\"report_root_id\", report_root_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n if if_match is not None:\n header_parameters['If-Match'] = self._serialize.header(\"if_match\", if_match, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def delete(self):\n query = \"DELETE FROM \" + self.table + \" WHERE \" + self.idfield + \"=%s\"\n dbh = dbstuff.getRW(self.dbh_key)\n try:\n c = dbh.cursor()\n c.execute(query, self.id)\n c.close()\n dbh.commit()\n finally:\n dbstuff.release(dbh,self.dbh_key)", "def delete(id_: int):\n logger.debug('Deleting employee with id %i.', id_)\n try:\n delete_employee = Employee.query.get(id_)\n db.session.delete(delete_employee)\n except Exception as exception:\n logger.error('An error occurred while deleting employee with id %i. '\n 'Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully deleted employee with id %i.', id_)", "def delete(self, id):\n raise NotImplementedError", "def delete_bug(self,id):\n self.execute(TABELLE['bugs']['delete'],(id,))", "def booking_delete(id):\n booking = Booking.query.get(id)\n payment = Payment.query.filter_by(booking_id=id).first()\n if not booking:\n return \"DELETED\"\n db.session.delete(booking)\n db.session.delete(payment)\n db.session.commit()\n return redirect(url_for('bookings.booking_index'))", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def remove(table, id_):\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n return table", "def delete(self,id):\r\n return delete(id=id)", "def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete_entry(self, scenario_id):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_id,))" ]
[ "0.6651148", "0.6608701", "0.64136994", "0.63094574", "0.6274263", "0.61082935", "0.60701895", "0.60681254", "0.6067816", "0.6066621", "0.60407543", "0.6026496", "0.6005154", "0.59901506", "0.59645677", "0.5961449", "0.5950091", "0.5949197", "0.59486943", "0.5945033", "0.59290916", "0.5906257", "0.58889955", "0.58504474", "0.5835041", "0.5820932", "0.5807622", "0.5803505", "0.57921994", "0.5790029" ]
0.69790393
0
uses list1 as the reference, returns list of items not in list2
def list_difference(list1, list2): diff_list = [] for item in list1: if not item in list2: diff_list.append(item) return diff_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_l2_from_l1(l1, l2):\r\n return [element for element in l1 if element not in l2]", "def list_difference(l1: List[Any], l2: List[Any]) -> List[Any]:\n return [item for item in l1 if item not in l2]", "def list_difference(list1, list2):\r\n diff_list = []\r\n for item in list1:\r\n if not item in list2:\r\n diff_list.append(item)\r\n else:\r\n if list2.count(item) != list1.count(item) and not item in diff_list:\r\n diff_list.append(item) \r\n return diff_list", "def listops_difference(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item not in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def subtraction_list(a , b):\n\n c = [i for i in list_a if i not in list_b]\n\n return(c)", "def filterout(L1, L2):\n for i in L1:\n if i in L2:\n L2.remove(i)", "def list_subtract(a, b):\n a_only = list(a)\n for x in b:\n if x in a_only:\n a_only.remove(x)\n return a_only", "def listSubtract(alist,blist):\n result = []\n for item in alist:\n if item not in blist:\n result.append(item)\n return result", "def get_list_diff(list1, list2):\n\n list3 = list(np.setdiff1d(list1,list2))\n return(list3)", "def difference(a, b):\n return list(filterfalse(lambda x: x in b, a))", "def remove_repeats(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for num in list2:\n if num not in list1:\n result.append(num)\n \n return result", "def set_difference(lst1, lst2):\n elements = []\n indicies = []\n for indx, item in enumerate(lst1):\n if item not in lst2:\n elements.append(item)\n indicies.append(indx)\n return elements, indicies", "def intersect(list1, list2):\n result_list = []\n #list3 = remove_duplicates(list1)\n for dummy_element in list1:\n if list2.count(dummy_element) > 0 and result_list.count(dummy_element) == 0:\n result_list.append(dummy_element)\n return result_list", "def get_contained(list1, list2):\n return [x for x in list1 for y in list2 if x == y]", "def compare_lists(self, list1, list2):\n matching_items = []\n\n list1 = list1.copy()\n list2 = list2.copy()\n\n for item in list1:\n if item in list2:\n matching_items.append(item)\n\n for m in matching_items:\n for c in range(list1.count(m)):\n list1.remove(m)\n for c in range(list2.count(m)):\n list2.remove(m)\n if list1 or list2:\n tmp_match = False\n else:\n tmp_match = True\n return tmp_match, list1, list2", "def difference(list1, list2):\n new_list = []\n for rule1 in list1:\n in_list2 = False\n literals1 = [x.string() for x in rule1]\n for rule2 in list2:\n literals2 = [x.string() for x in rule2]\n if literals1 == literals2:\n in_list2 = True\n if not in_list2:\n new_list.append(rule1)\n return new_list", "def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_combos: list[tuple[str, str]] = []\n for item_1 in list_1:\n for item_2 in list_2:\n all_combos.append((item_1, item_2))\n return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))]", "def drop_matches(list1, list2):\n list1.sort()\n list2.sort()\n matches = []\n i = j = 0\n lenLst1 = len(list1)\n lenLst2 = len(list2)\n while i < lenLst1 and j < lenLst2:\n if list1[i] < list2[j]:\n matches.append(list1[i])\n i+=1\n elif list1[i] > list2[j]:\n matches.append(list2[j])\n j+=1\n else: #they are the same\n i+=1\n j+=1\n while i < lenLst1:\n matches.append(list1[i])\n i+=1\n while j < lenLst2:\n matches.append(list2[j])\n j+=1\n return len(matches), matches", "def diff(xs, ys):\n return [x for x in xs if x not in ys]", "def get_missing_keys(first_list, second_list):\n missing = []\n for key in first_list:\n if key not in second_list:\n missing.append(key)\n return missing", "def list_update(l1, l2):\n return filter(lambda e : e not in l2, l1) + list(l2)", "def entries_not_in(self, other):\n other_keys = set(other._entries.keys())\n filtered_order = [k for k in self._order if k not in other_keys]\n return [self._entries[k] for k in filtered_order]", "def _subtract_access_lists(self, list_a, list_b):\n sub_tuples_list = [{\"to\": s.get('access_to'),\n \"type\": s.get('access_type'),\n \"level\": s.get('access_level')}\n for s in list_b]\n return [r for r in list_a if (\n {\"to\": r.get(\"access_to\"),\n \"type\": r.get(\"access_type\"),\n \"level\": r.get(\"access_level\")} not in sub_tuples_list)]", "def get_list_difference(self, set_one, set_two):\n s1 = set(set_one)\n s2 = set(set_two)\n return list(s1.difference(s2))", "def unique_list(\n l1: list,\n l2: list,\n ) -> list:\n\n l = list((set(l1) | set(l2)) - (set(l1) & set(l2)))\n\n return l", "def difference(self, other: list) -> 'List':\n if not isinstance(other, list):\n raise ValueError('The comparing element is not a list')\n\n return List(item for item in self if item not in other)", "def difference(a, b):\r\n c = [i for i in a + b if i not in a or i not in b]\r\n return c", "def difference(a, b):\r\n return list(set(b).difference(set(a)))", "def list_intersect(l1: List[Any], l2: List[Any]) -> List[Any]:\n return [item for item in l1 if item in l2]", "def compare(lst1, lst2):\n bonnie = []\n for item in lst1:\n if item in lst2:\n bonnie.append(item)\n return bonnie" ]
[ "0.79822516", "0.7981959", "0.77709067", "0.7688654", "0.7456978", "0.74536794", "0.73608845", "0.7324599", "0.72618556", "0.7249769", "0.7235629", "0.7190773", "0.71434844", "0.70034856", "0.6966538", "0.6920006", "0.69053674", "0.6897035", "0.6892836", "0.68181765", "0.6813268", "0.68063015", "0.6804098", "0.67796195", "0.6757042", "0.67341363", "0.66964924", "0.6685938", "0.66181815", "0.6616749" ]
0.8260505
0
Create a new SDGraphObjectFrame instance in the specified graph
def sNew(sdGraph): outSDGraphObjectFrame = ctypes.c_void_p() _res = sd.getContext().SDGraphObjectFrame_sNew(sdGraph.mHandle, ctypes.byref(outSDGraphObjectFrame)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) constructor = sd.getContext().mTypeMap[SDAPIObject(sd.getContext(), outSDGraphObjectFrame, ownHandle=False).getClassName()] return constructor(sd.getContext(), outSDGraphObjectFrame.value, ownHandle=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def graph_with_graph(cls, graph):\n new = cls()\n new.nx_graph = graph.nx_graph.copy()\n new.max_date = graph.max_date\n new.min_date = graph.min_date\n return new", "def __init__(self, graph=None): # noqa: E501\n self.openapi_types = {\n 'graph': Neo4jGraphGraph\n }\n\n self.attribute_map = {\n 'graph': 'graph'\n }\n\n self._graph = graph", "def new_graph(self, obj, *args, **kwargs):\n self._executor.new_graph(obj, *args, *(kwargs.values()))", "def load_graph(cls, graph):\n if isinstance(graph, tuple):\n nodes, edges = graph\n df = cls(nodes)\n df.is_graph = True\n df.edges = edges\n return df\n raise ValueError(\n \"Expected value for graph - (nodes[cuDF], edges[cuDF])\"\n )", "def __init__(self, graph: ghidra.graph.GImplicitDirectedGraph):\n ...", "def __init__(self, graph: SceneGraph) -> None:\n self.graph: SceneGraph = graph", "def from_dict(cls, graph: Dict[str, Any], name: str = 'UDS') -> 'UDSGraph':\n return cls(adjacency_graph(graph), name)", "def __init__(self, graph_dict=None):\n if graph_dict == None:\n graph_dict = {}\n self.__graph_dict = graph_dict", "def CreateGraph(meta_graph):\n LogMetaGraph(meta_graph)\n ExportMetaGraph(meta_graph)\n CreateGraphCC(_stringify_proto(meta_graph))\n LogOptimizedGraph(meta_graph)", "def __init__(self, graph_dict=None):\r\n if graph_dict == None:\r\n graph_dict = {}\r\n self.__graph_dict = graph_dict", "def __init__(self, graph_dict=None):\n if graph_dict == None:\n graph_dict = {}\n self.graph_dict = graph_dict", "def _construct_graph(self):\n raise NotImplementedError", "def __init__(self):\n self._graph = DirectedGraph()\n self._graph_copies = []", "def __init__(self, graph_path):\n self.graph = tf.Graph()\n graph_path = Path(graph_path)\n if not graph_path.exists():\n raise FileNotFoundError(\"The graph path not exists.\")\n\n graph_def = tf.compat.v1.GraphDef()\n loaded = graph_def.ParseFromString(open(graph_path, 'rb').read())\n\n if graph_def is None:\n raise RuntimeError('Graph load failed.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n config = tf.ConfigProto()\n # config.gpu_options.visible_device_list = '1'\n config.gpu_options.allow_growth = True\n\n self.sess = tf.Session(graph=self.graph, config=config)", "def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()", "def CreateGraph(graph_def):\n option = GetGlobalOptions()\n LogMetaGraph(graph_def)\n ExportMetaGraph(graph_def)\n return _C.CreateGraph(\n _stringify_proto(graph_def),\n option['log_optimized_graph'],\n )", "def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph):\n self.graph_a = graph_a\n self.graph_b = graph_b", "def create(data, schema=None, tc=implicit):\n from sparktk.frame.frame import Frame\n return Frame(tc, data, schema)", "def __init__(self, graph: ghidra.graph.GImplicitDirectedGraph, metric: ghidra.graph.GEdgeWeightMetric):\n ...", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(sys.argv[1], 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def __init__(self, graph_dict=None):\n self.Time = 0\n self.scc_field = {}\n\n if graph_dict == None:\n graph_dict = {}\n self.__graph_dict = graph_dict", "def _create_pgframe(nodes=None, edges=None):\n pass", "def __init__(self, graph: GraphTraversalSource, directed: bool = True):\n self._g = graph", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(modelFullPath, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_graph():\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, FLAGS.model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create(graph, verbose=True):\n if not isinstance(graph, _SGraph):\n raise TypeError('graph input must be a SGraph object.')\n\n with QuietProgress(verbose):\n params = _tc.extensions._toolkits.graph.connected_components.create(\n {'graph': graph.__proxy__})\n return ConnectedComponentsModel(params['model'])", "def __init__(self, args):\n (vertices, edges) = args\n graph = Graph()\n graph.add_vertices(vertices)\n graph.add_edges(edges)\n self._graph = graph\n from sage_semigroups.categories.finite_left_regular_bands import FiniteLeftRegularBands\n Parent.__init__(self, category=FiniteLeftRegularBands().FinitelyGenerated())", "def __init__(self, handle, config=None, object_id=None, graphscope_session=None):\n handle = self.decode_arg(handle)\n config = self.decode_arg(config)\n\n if config is None:\n if \"config\" in handle:\n config = handle[\"config\"]\n if config is None:\n config = collections.defaultdict(lambda: dict)\n\n if object_id is None:\n object_id = handle[\"vineyard_id\"]\n\n self.handle = handle\n self.config = config\n self.object_id = object_id\n self.closed = False\n self.graphscope_session = graphscope_session\n super(Graph, self).__init__()\n\n self.vineyard(handle, config[\"nodes\"], config[\"edges\"])\n for label, node_attr in config[\"node_attributes\"].items():\n n_ints, n_floats, n_strings = (\n node_attr[1][0],\n node_attr[1][1],\n node_attr[1][2],\n )\n self.node_attributes(label, node_attr[0], n_ints, n_floats, n_strings)\n for label, edge_attr in config[\"edge_attributes\"].items():\n n_ints, n_floats, n_strings = (\n edge_attr[1][0],\n edge_attr[1][1],\n edge_attr[1][2],\n )\n self.edge_attributes(label, edge_attr[0], n_ints, n_floats, n_strings)\n\n for node_view_label, node_label, nsplit, split_range in config[\"gen_labels\"]:\n self.node_view(\n node_view_label, node_label, nsplit=nsplit, split_range=split_range\n )\n\n self.init_vineyard(worker_index=0, worker_count=1)", "def __init__(self, meta_graph_path, graph=None):\n\n # 检查tf版本\n if (\n (LooseVersion(tf.__version__) < LooseVersion('1.3.0')) \n or (LooseVersion(tf.__version__) > LooseVersion('1.3.1'))\n ):\n err_string = 'you are using tensorflow version ' + tf.__version__ + ' but only versions 1.3.0 to 1.3.1 are supported'\n raise NotImplementedError(err_string)\n # check exist\n if graph is None:\n self.graph = tf.Graph()\n else:\n self.graph = graph\n self.saver = None\n self.eval_pred_fn = None\n self._restore_graph(meta_graph_path)", "def __init__(self, graph_format='png'):\n\n self.graph = Digraph(\"Dependencies\", format=graph_format, filename='dependency_graph.gv', node_attr={'color': 'lightblue2', 'style': 'filled'})\n self.graph.attr(size='8000,8000')" ]
[ "0.6099084", "0.595656", "0.5864762", "0.5828153", "0.5798196", "0.57338095", "0.5618525", "0.5582457", "0.5571058", "0.5561344", "0.5512142", "0.54885", "0.5407223", "0.53947866", "0.53532344", "0.5335441", "0.52873886", "0.52579653", "0.52570146", "0.5252456", "0.52443326", "0.5227214", "0.5222094", "0.5207456", "0.52066284", "0.51957685", "0.5179104", "0.5164052", "0.5162166", "0.51483387" ]
0.70377195
0
Get the SDGraphObjectFrame title
def getTitle(self): outValue = ctypes.c_char_p() _res = self.mAPIContext.SDGraphObjectFrame_getTitle(self.mHandle, ctypes.byref(outValue)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) return outValue.value.decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def title(self):\n return self._frame._title", "def get_title(self):\n return self.metadata['title']", "def get_title():", "def title(self):\n return self.container['title']", "async def title(self):\n if not hasattr(self, \"_title\"):\n self._title = await Stack.fetch_stack_value(self, \"http://purl.org/dc/terms/title\", await self.uuid)\n return self._title", "def get_title(self, obj):\n title = obj.habit.title\n return title", "def get_title(self):\n return self.run_command('get_title')[0]", "def title(self):\n return self.metadata.get('title')", "def title(self):\n for shape in self.__shapes:\n if shape._is_title:\n return shape\n return None", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def title(self):\n return self.run_command('title')[0]", "def title(self):\n return self.header", "def get_title(self) -> str:\n pass", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_title(self):\n return self._get_title_()", "def getTitle(self):\n return self._title", "def title(self):\n\n return self._title", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def title(self):\n return self['title']", "def get_title(self):\n title = self.driver.title\n return title", "def getTitle(self):\n return self.__title__", "def get_title(self):\n\n return self.title", "def get_title(self):\n meta = self.get_meta_data()\n if \"og:title\" in meta:\n return meta[\"og:title\"]\n else:\n soup = BeautifulSoup(self.TARGET_DATA)\n title = soup.find('title')\n if title:\n return title.text\n else:\n return \"No Title\"" ]
[ "0.7783941", "0.7328815", "0.726359", "0.72170377", "0.7165904", "0.71523243", "0.7066919", "0.70472175", "0.7046419", "0.7003007", "0.7003007", "0.7003007", "0.70027757", "0.70027757", "0.70027757", "0.69826674", "0.69338393", "0.69271576", "0.68917066", "0.68917066", "0.68913853", "0.6877599", "0.6877196", "0.68686455", "0.68606764", "0.68598837", "0.68579084", "0.6853584", "0.68527716", "0.68282515" ]
0.80401707
0
Get the SDGraphObjectFrame color
def getColor(self): outValue = ColorRGBA() _res = self.mAPIContext.SDGraphObjectFrame_getColor(self.mHandle, ctypes.byref(outValue)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) return outValue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graph_color ( self, object ):\n return self.graph_color_", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def getColor(self):\r\n return self.color", "def get_color(self):\r\n return self.__color", "def get_color(self):\r\n return self._color", "def get_color(self):\n return self.color", "def color(self):\n return self['color']", "def getColor(self):\n return self.color", "def color(self):\n return self.container['color']", "def getColor(self):\n return self._l[2]", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def fl_get_object_color(ptr_flobject):\n _fl_get_object_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_color\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.POINTER(xfdata.FL_COLOR),\n cty.POINTER(xfdata.FL_COLOR)], \\\n \"\"\"void fl_get_object_color(FL_OBJECT * ob, FL_COLOR * col1,\n FL_COLOR * col2)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n ul_fgcolr, ptr_fgcolr = library.make_FL_COLOR_and_pointer()\n ul_bgcolr, ptr_bgcolr = library.make_FL_COLOR_and_pointer()\n library.keep_elem_refs(ptr_flobject, ul_fgcolr, ptr_fgcolr, \\\n ul_bgcolr, ptr_bgcolr)\n _fl_get_object_color(ptr_flobject, ptr_fgcolr, ptr_bgcolr)\n return ul_fgcolr.value, ul_bgcolr.value", "def get_color(self):\n\n return self.color", "def get_color(self):\n return self._io.last_state['color']['front-center']", "def color(self):\n if \"color\" in self._prop_dict:\n return self._prop_dict[\"color\"]\n else:\n return None", "def color(self):\n return self.__color", "def color(self):\n return self._color", "def color(self):\n return self._color", "def color(self):\n return self.COLOR", "def get_color(self):\n\n return self._color", "def get_colour(self):\n return self.colour", "def get_color(self, _pos):\n return self.__framebuffer[_pos]", "def get_color(self):\n return COLOR_DICT[self.element]" ]
[ "0.76436764", "0.713944", "0.713944", "0.713944", "0.713944", "0.7060627", "0.7057942", "0.70264846", "0.7010687", "0.6968809", "0.6927457", "0.69249076", "0.68727857", "0.6870275", "0.6870275", "0.68354243", "0.68354243", "0.68354243", "0.6835348", "0.6824584", "0.6767909", "0.6732262", "0.6724823", "0.66939515", "0.66939515", "0.6675867", "0.6655846", "0.66197914", "0.6602178", "0.65688884" ]
0.765588
0
Set the SDGraphObjectFrame color
def setColor(self, value): _res = self.mAPIContext.SDGraphObjectFrame_setColor(self.mHandle, ctypes.byref(value)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_color(self, color):\n\t\tpass", "def set_color(self, color):\n pass", "def set_color(self, new_color):\n self.color = new_color", "def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color", "def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color", "def set_color(self, color):\n self.color = color", "def fl_set_object_color(ptr_flobject, fgcolr, bgcolr):\n _fl_set_object_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_color\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_COLOR,\n xfdata.FL_COLOR],\n \"\"\"void fl_set_object_color(FL_OBJECT * ob, FL_COLOR col1,\n FL_COLOR col2) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n #library.checknonfatal_allowed_value_in_list(fgcolr, xfdata.COLOR_list)\n #library.checknonfatal_allowed_value_in_list(bgcolr, xfdata.COLOR_list)\n ul_fgcolr = library.convert_to_FL_COLOR(fgcolr)\n ul_bgcolr = library.convert_to_FL_COLOR(bgcolr)\n library.keep_elem_refs(ptr_flobject, fgcolr, ul_fgcolr, \\\n bgcolr, ul_bgcolr)\n _fl_set_object_color(ptr_flobject, ul_fgcolr, ul_bgcolr)", "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def setColor(self, color):\n self.__color = color", "def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', color)", "def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)", "def set_color(self, color):\n self._color = color", "def _update_color(self, color):\n self.color = color", "def color(self, color):\n\n self.container['color'] = color", "def change_color(self, color):\n self.color = color", "def set_color(self, color: str):\n self.color = color", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def setColor(self, color):\n self.point_color = color\n self.side_color = color\n self.area_color = color", "def setFillColor(self, color):\n fillColor = color\n repaint()", "def set_color(obj, used_colors):\n color = Helper.get_random_color()\n # Check that the current color is not already used\n while color in used_colors:\n color = Helper.get_random_color()\n used_colors.append(color)\n # Apply the chosen color to the object\n obj[c4d.ID_BASEOBJECT_USECOLOR] = c4d.ID_BASEOBJECT_USECOLOR_ALWAYS\n obj[c4d.ID_BASEOBJECT_COLOR] = color", "def setColour(self, col):\n\t\tself.colour = col", "def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()", "def set_color(self, color):\n # type: (Color) -> None\n\n self.color = color", "def SetColor(self, rgbtuple):\n if not rgbtuple:\n rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()\n col = [c/255.0 for c in rgbtuple]\n self.figure.set_facecolor(col)\n self.figure.set_edgecolor(col)\n self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))", "def set_trace_color(color): #py:set_trace_color\n RUR._set_trace_color_(color)", "def set_trace_color(self, color): #py:UR.set_trace_color\n RUR._UR.set_trace_color_(self.body, color)", "def setColor(pnj, color):\r\n\r\n assert isinstance(color, (int, tuple, str))\r\n pnj[\"color\"] = color", "def set_color(self, color):\n with doc_ctrl.open_command():\n doc_ctrl.set_color(self.lbl, color)\n std_events.document_modified.emit()", "def setColorIndex(idx):\n dislin.setclr(idx)" ]
[ "0.70018196", "0.6966884", "0.68272316", "0.6800456", "0.6800456", "0.6735271", "0.6730594", "0.67269", "0.6688919", "0.6627748", "0.6626664", "0.6562067", "0.65328383", "0.64631224", "0.6437765", "0.6426548", "0.6391508", "0.6391476", "0.6377652", "0.62837225", "0.6264317", "0.6260212", "0.62546784", "0.6251476", "0.6247712", "0.62349015", "0.6224834", "0.6217905", "0.6183964", "0.6169776" ]
0.785959
0
Get the SDGraphObjectFrame size
def getSize(self): outSize = float2() _res = self.mAPIContext.SDGraphObjectFrame_getSize(self.mHandle, ctypes.byref(outSize)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) return outSize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSize(self):\n\n return self.size", "def getSize(self):\n return GDimension(frameWidth, frameHeight)", "def getSize(self):\r\n return self.size", "def getFrameSize(self):\n \n return self.frame_size", "def getSize(self):\n return self.size", "def fl_get_object_size(ptr_flobject):\n _fl_get_object_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_size\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.POINTER(xfdata.FL_Coord),\n cty.POINTER(xfdata.FL_Coord)], \\\n \"\"\"void fl_get_object_size(FL_OBJECT * obj, FL_Coord * w,\n FL_Coord * h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_width, ptr_width = library.make_FL_Coord_and_pointer()\n i_height, ptr_height = library.make_FL_Coord_and_pointer()\n library.keep_elem_refs(ptr_flobject, i_width, i_height, ptr_width, \\\n ptr_height)\n _fl_get_object_size(ptr_flobject, ptr_width, ptr_height)\n return i_width.value, i_height.value", "def size(self):\n return self.getattr('size')", "def get_frame_size(self):\n return self._frames.shape[-1]", "def Framesize(self):\n\t\treturn self._get_attribute('framesize')", "def size(self):\n if self.frames is None:\n return 0\n return self.frames.size", "def setSize(self, value):\n _res = self.mAPIContext.SDGraphObjectFrame_setSize(self.mHandle, ctypes.byref(value))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return None", "def size(self):\r\n return self.size.data", "def get_size(self):\n return self._surf.get_size()", "def getSize(self):\n return self.__size", "def frame_size(self):\n return self._frame_size", "def get_size(self):\n return self.__size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def size(self):\r\n return self._size", "def __get_size(self):\n return self.__size", "def getSize(self):\n return self._size", "def get_size(self):\r\n return self._size", "def get_size(self):\r\n return self.__size", "def size(self):\n return self.properties.get('size')", "def size(self):\r\n return self.info().size", "def get_size(self):\r\n\r\n return self._size", "def get_size(self):", "def size(self):\n return self._size", "def size(self):\n if hasattr(self, \"_size\"):\n return self._size\n else:\n return None" ]
[ "0.7424616", "0.74093646", "0.7397733", "0.73387116", "0.7282922", "0.7269103", "0.7247958", "0.7237671", "0.72307", "0.72290355", "0.7214162", "0.72126585", "0.72106576", "0.71800494", "0.7156522", "0.7156451", "0.715393", "0.715393", "0.715393", "0.7151215", "0.71424764", "0.7131172", "0.71139723", "0.71075857", "0.70887023", "0.70803046", "0.70656776", "0.7058241", "0.70540434", "0.7043831" ]
0.82977587
0
Set the SDGraphObjectFrame size
def setSize(self, value): _res = self.mAPIContext.SDGraphObjectFrame_setSize(self.mHandle, ctypes.byref(value)) if _res != SDApiError.NoError.value: if _res == SDApiError.NoErrorOutputParamNotSet.value: return None raise APIException(SDApiError(_res)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setFrameSize(self, frame_size):\n \n self.frame_size = frame_size", "def set_size(self, size):\n self.dtSize = size", "def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]", "def setDescriptorSize(self, dsize): # real signature unknown; restored from __doc__\n pass", "def fl_set_object_size(ptr_flobject, width, height):\n _fl_set_object_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_size\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_Coord,\n xfdata.FL_Coord], \\\n \"\"\"void fl_set_object_size(FL_OBJECT * obj, FL_Coord w,\n FL_Coord h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n library.keep_elem_refs(ptr_flobject, width, i_width, height, i_height)\n _fl_set_object_size(ptr_flobject, i_width, i_height)", "def size(self, size):\n self.width = size\n self.height = size", "def size(self, size):\n self.width = size\n self.height = size", "def setsize(self, size):\n self.__size = size", "def set_frame_size(*args):\n return _ida_frame.set_frame_size(*args)", "def set_size(self, w, h):\n\t\tpass", "def setSize_0(self, size):\n self.setSize(size.getWidth(), size.getHeight())", "def set_frame_size(self, frame_size_selector):\n raise NotImplementedError", "def set_pixel_size(self, pixel_size):\n raise NotImplementedError", "def setSize(self, width, height):\n frameWidth = width\n frameHeight = height\n repaint()", "def onSize(self, event): \n\t\tw, h = self.GetClientSizeTuple()\n\t\tself.tree.SetDimensions(0, 0, w, h)", "def size(self, size):\n self._size = size", "def size(self, value):\n self.width = value\n self.height = value", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()", "def setPointSize(self, size):\n for point in self.points:\n point.size = size", "def update_size(self):\n self.size = self.image.size\n self.width, self.height = self.size", "def changeSize(self, value):\n self.layer.brush_size = value", "def fl_set_object_lsize(ptr_flobject, size):\n _fl_set_object_lsize = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_lsize\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.c_int], \\\n \"\"\"void fl_set_object_lsize(FL_OBJECT * ob, int lsize) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_size = library.convert_to_intc(size)\n library.keep_elem_refs(ptr_flobject, size, i_size)\n _fl_set_object_lsize(ptr_flobject, i_size)", "def size(self, val):\n self.width = val\n self.height = val", "def set_pointsize(self, pointsize):\n\tself.m_pointsize = pointsize", "def size(self, value):\n self.width = value" ]
[ "0.7229462", "0.71947026", "0.7075671", "0.70734507", "0.7000243", "0.6931248", "0.6931248", "0.6877675", "0.6803384", "0.6802796", "0.6758983", "0.67472947", "0.656778", "0.6560254", "0.6538937", "0.64847857", "0.64384997", "0.6386987", "0.63508207", "0.63508207", "0.63508207", "0.63508207", "0.6334763", "0.6327431", "0.63260144", "0.63203996", "0.6313298", "0.6312339", "0.62740713", "0.6265208" ]
0.7277744
0
Test working ssdp flow.
async def test_flow_ssdp(hass): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_SSDP}, data=SSDP_DATA, ) assert result["type"] == "form" assert result["step_id"] == "init" assert result["description_placeholders"] == { CONF_NAME: FRIENDLY_NAME, CONF_HOST: HOST, } flow = _flow_next(hass, result["flow_id"]) assert flow["context"]["unique_id"] == UDN result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result["type"] == RESULT_TYPE_CREATE_ENTRY assert result["title"] == FRIENDLY_NAME assert result["data"] == CONF_DATA
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_flow_ssdp_discovery(opp, aioclient_mock):\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }", "async def test_ssdp_discovery(hass: HomeAssistantType, requests_mock: Mocker) -> None:\n mock_connection(requests_mock)\n\n discovery_info = {\n ATTR_SSDP_LOCATION: SSDP_LOCATION,\n ATTR_UPNP_FRIENDLY_NAME: UPNP_FRIENDLY_NAME,\n ATTR_UPNP_SERIAL: UPNP_SERIAL,\n }\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"ssdp_confirm\"\n assert result[\"description_placeholders\"] == {CONF_NAME: UPNP_FRIENDLY_NAME}\n\n with patch(\n \"homeassistant.components.roku.async_setup\", return_value=True\n ) as mock_setup, patch(\n \"homeassistant.components.roku.async_setup_entry\", return_value=True,\n ) as mock_setup_entry:\n result = await hass.config_entries.flow.async_configure(\n flow_id=result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == UPNP_FRIENDLY_NAME\n\n assert result[\"data\"]\n assert result[\"data\"][CONF_HOST] == HOST\n assert result[\"data\"][CONF_NAME] == UPNP_FRIENDLY_NAME\n\n await hass.async_block_till_done()\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def test_established_tcp_session_after_re_attachinging_sg(self):\n\n ssh_sg = self._create_security_group('ssh_sg')\n self.create_loginable_secgroup_rule(secgroup_id=ssh_sg['id'])\n vm_ssh, fips, vms = self.create_vm_testing_sec_grp(\n security_groups=[{'name': ssh_sg['name']}])\n sg = self._create_security_group('sg')\n nc_rule = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': 6666,\n 'port_range_max': 6666}]\n self.create_secgroup_rules(nc_rule, secgroup_id=sg['id'])\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=vms[1]['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n with utils.StatefulConnection(\n vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:\n self.client.update_port(srv_port['id'],\n security_groups=[ssh_sg['id'], sg['id']])\n con.test_connection()\n with utils.StatefulConnection(\n vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:\n self.client.update_port(\n srv_port['id'], security_groups=[ssh_sg['id']])\n con.test_connection(should_pass=False)\n with utils.StatefulConnection(\n vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:\n self.client.update_port(srv_port['id'],\n security_groups=[ssh_sg['id'], sg['id']])\n con.test_connection()\n self.client.update_port(srv_port['id'],\n security_groups=[ssh_sg['id']])\n con.test_connection(should_pass=False)\n self.client.update_port(srv_port['id'],\n security_groups=[ssh_sg['id'], sg['id']])\n con.test_connection()", "async def test_ssdp_bravia(hass):\n ssdp_data = copy.deepcopy(SSDP_DATA)\n ssdp_data[\"X_ScalarWebAPI_DeviceInfo\"][\"X_ScalarWebAPI_ServiceList\"][\n \"X_ScalarWebAPI_ServiceType\"\n ].append(\"videoScreen\")\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_SSDP}, data=ssdp_data,\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"not_songpal_device\"", "async def test_flow_ssdp_bad_discovery(opp, aioclient_mock):\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={ATTR_UPNP_MANUFACTURER_URL: \"other\"},\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"not_deconz_bridge\"", "def test_continuous_traffic(self,setup_suite):\n logger.info('Test Traffic in continuous Mode')\n kwargs = { 'vs_names':['vs-1', 'vs-2']}\n traffic_obj = traffic_start(**kwargs)\n logger.info('Waiting for 10 sec while traffic is flowing ...')\n time.sleep(10)\n traffic_expect_no_errors(traffic_obj, vs_names=['vs-1', 'vs-2'])\n traffic_get_stats(traffic_obj)\n traffic_stop()", "def test():\n suite = unittest.TestLoader().loadTestsFromTestCase(TestTwistedSFTPServer)\n runtime = unittest.TextTestRunner(verbosity=2).run(suite)\n return runtime.wasSuccessful()", "def doTest(self, module, payloads):\n for payload in payloads:\n # Perform test & write report\n str = \"TEST #%s - %s\" % (self.testnum, payload[0])\n print str[:62].ljust(65,'.'),\n #test_dt_start = datetime.datetime.now()\n test_dt_start = time.strftime('%Y-%m-%d %H:%M:%S')\n pattern = \"\"\n\n if payload[1] == \"socket\":\n cmd = self.commandParser('socket', payload[4])\n (test_port, test_proto) = (payload[2], payload[3].lower())\n test_payload = cmd\n if payload[3].lower() == 'tcp':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((self._target,payload[2]))\n s.send(cmd)\n pattern = payload[5]\n s.close()\n elif payload[1] == \"command\":\n cmd = self.commandParser('command', payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = ' '.join(cmd)\n if self._debug==1:\n print \"\\n\\n***Debug: sending command: %s\" % ' '.join(cmd)\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n elif payload[1] == \"scapy\":\n cmd = self.commandParser('scapy', payload[2])\n if self._debug == 1:\n print \"\\n\\n***Debug: sending scapy payload: %s\" % cmd\n cmd = cmd.replace('verbose=0', 'verbose=1')\n (test_port, test_proto) = (None, None)\n test_payload = cmd\n eval(cmd)\n pattern = payload[3]\n elif payload[1] == \"pcap\":\n pcap = os.path.join(self.config.get('PATHS', 'pcapdir'), payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = pcap\n if self._debug == 1:\n # verbose mode\n print \"Pcap Replay file\"\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-i', self.config.get('CLIENT','iface'), pcap]\n else:\n # quiet mode\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-q', '-i', self.config.get('CLIENT','iface'), pcap]\n if self._debug==1:\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n\n test_dt_end = time.strftime('%Y-%m-%d %H:%M:%S')\n\n # Sleep before getting alerts\n time.sleep(int(self.config.get('TIMING', 'sleepbeforegetalerts')))\n\n # Get new alerts and calculate new offset\n self.getAlertsFile()\n res = self.getAlertsFromOffset(self.config.get('PATHS', 'tempfile'), self.offset)\n\n # Sig matching\n if pattern != \"\":\n if re.search(pattern, res):\n test_flag = 2\n else:\n if res == '':\n test_flag = 0\n else:\n test_flag = 1\n test_sig_match = pattern\n else:\n test_sig_match = None\n test_flag = None\n\n test_alert = res\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n database.DB(self._cnf).addTestResult((module, payload[1], test_dt_start,\n test_dt_end, payload[0], test_port, test_proto, test_payload,\n test_sig_match, res, test_flag))\n\n print \"[ done ]\"\n \n # Sleep before next test\n time.sleep(int(self.config.get('TIMING', 'sleepbeforenexttest')))\n self.testnum += 1", "def test_fsdp_tp_checkpoint_integration(self):\n tensor_parallel_size = 2\n torch.manual_seed(0)\n model = SimpleModel().cuda(self.rank)\n tp_pg, fsdp_pg = self._get_sub_pgs(tensor_parallel_size)\n # Shard with TP and then wrap with FSDP\n sharding_specs = self._get_chunk_sharding_spec(tp_pg.size(), tp_pg)\n sharding_plan = SimpleModel.module_sharding_plan(sharding_specs)\n shard_module(model, sharding_plan, process_group=tp_pg)\n tp_fsdp_model = FSDP(model, process_group=fsdp_pg)\n\n # Check that we produce a nested ST from model state dict\n with FSDP.state_dict_type(tp_fsdp_model, StateDictType.SHARDED_STATE_DICT):\n state_dict = tp_fsdp_model.state_dict()\n # TODO once 2D is out, validate the nesting\n self.assertTrue(_is_nested_tensor(state_dict[\"net1.weight\"]))\n self.assertFalse(_is_nested_tensor(state_dict[\"net1.bias\"]))\n tp_fsdp_model.load_state_dict(state_dict)\n\n tp_fsdp_optim = torch.optim.Adam(tp_fsdp_model.parameters(), lr=0.0001)\n\n input_seed = self.rank\n torch.manual_seed(input_seed + 1)\n inp_size = [2, 3, 5]\n inp = torch.rand(*inp_size).cuda(self.rank)\n\n tp_fsdp_model(inp).sum().backward()\n tp_fsdp_optim.step()\n\n # Check that we produce a nested ST from optim state dict\n optim_state = FSDP.sharded_optim_state_dict(tp_fsdp_model, tp_fsdp_optim)\n # TODO once 2D is out, validate the nesting\n self.assertTrue(\n _is_nested_tensor(optim_state[\"state\"][\"net1.weight\"][\"exp_avg\"])\n )\n self.assertFalse(\n _is_nested_tensor(optim_state[\"state\"][\"net1.bias\"][\"exp_avg\"])\n )", "def test_run(self) -> None:\n appConf = loadAppConfig()\n smsApi = SmsApi(appConf[\"smsUsername\"], appConf[\"smsPassword\"],\n appConf[\"persons\"], appConf[\"groups\"])\n resp: bool = smsApi.sendSmsToGroup(\n 'it', '[Alerting] CCTV uptime notification')\n self.assertTrue(resp == True)", "def test_connection(self):\n self._bind_to_service()", "def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )", "def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()", "def test_sai_from_ptf(sai_testbed, duthost, ptfhost, test_case, request):\n logger.info(\"Checking test environment before running test.\")\n dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']\n start_sai_test_conatiner_with_retry(duthost, get_sai_test_container_name(request))\n try:\n logger.info(\"Running test: {0}\".format(test_case))\n ptfhost.shell(\"ptf --test-dir {0} {1} {2} --relax --xunit --xunit-dir {3} \\\n -t \\\"server='{4}';port_map_file='{5}'\\\"\"\n .format(\n SAI_TEST_CASE_DIR_ON_PTF, \n test_case, \n TEST_INTERFACE_PARAMS,\n SAI_TEST_REPORT_TMP_DIR_ON_PTF, \n dut_ip, \n PORT_MAP_FILE_PATH))\n logger.info(\"Test case [{}] passed.\".format(test_case))\n except BaseException as e: \n stop_and_rm_sai_test_container(duthost, get_sai_test_container_name(request))\n logger.info(\"Test case [{}] failed as {}\".format(test_case, e))\n pytest.fail(\"Test case [{}] failed\".format(test_case), e)\n finally:\n _store_test_result(ptfhost)", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def test_pfc_pause_lossy_traffic(api, \n duthost, \n lossy_configs, \n start_delay, \n traffic_duration) :\n duthost.shell('sudo pfcwd stop')\n\n for base_config in lossy_configs:\n\n # create the configuration\n api.set_config(base_config)\n\n # start all flows\n api.set_flow_transmit(FlowTransmit(state='start'))\n\n exp_dur = start_delay + traffic_duration\n logger.info(\"Traffic is running for %s seconds\" %(exp_dur))\n time.sleep(exp_dur)\n\n # stop all flows\n api.set_flow_transmit(FlowTransmit(state='stop'))\n\n # Get statistics\n test_stat = api.get_flow_results(FlowRequest())\n\n for rows in test_stat['rows'] :\n tx_frame_index = test_stat['columns'].index('frames_tx')\n rx_frame_index = test_stat['columns'].index('frames_rx')\n caption_index = test_stat['columns'].index('name') \n if ((rows[caption_index] == 'Test Data') or\n (rows[caption_index] == 'Background Data')):\n if rows[tx_frame_index] != rows[rx_frame_index] :\n pytest_assert(False,\n \"Not all %s reached Rx End\" %(rows[caption_index]))", "async def test_sddp_exist(hass):\n _create_mock_config_entry(hass)\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_SSDP}, data=SSDP_DATA,\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"", "def test_perform_host_action(self):\n pass", "def test_verify_connection_to_a_device():", "def test_udp_alt_iteration():\n cmd = [\"python\", \"dnsck/dnsck.py\", \"-s\", \"8.8.8.8\", \"google.com\", \"-i\", \"1\"]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def test_poll(self):\n fake_controller_setup(\n self.enforcement_controller,\n self.enforcement_stats_controller,\n )\n imsi = 'IMSI001010000000013'\n sub_ip = '192.168.128.74'\n\n flow_list = [\n FlowDescription(\n match=FlowMatch(\n ip_dst=convert_ipv4_str_to_ip_proto('45.10.0.0/25'),\n direction=FlowMatch.UPLINK,\n ),\n action=FlowDescription.PERMIT,\n ),\n ]\n policy = VersionedPolicy(\n rule=PolicyRule(id='rule1', priority=3, flow_list=flow_list),\n version=1,\n )\n self.service_manager.session_rule_version_mapper.save_version(\n imsi, convert_ipv4_str_to_ip_proto(sub_ip), 'rule1', 1,\n )\n\n \"\"\" Setup subscriber, setup table_isolation to fwd pkts \"\"\"\n sub_context = RyuDirectSubscriberContext(\n imsi, sub_ip, self.enforcement_controller,\n self._main_tbl_num, self.enforcement_stats_controller,\n ).add_policy(policy)\n\n snapshot_verifier = SnapshotVerifier(\n self, self.BRIDGE,\n self.service_manager,\n )\n with sub_context, snapshot_verifier:\n rule_map = self.enforcement_stats_controller.get_stats()\n if (rule_map.records[0].rule_id == self.DEFAULT_DROP_FLOW_NAME):\n rule_record = rule_map.records[1]\n else:\n rule_record = rule_map.records[0]\n self.assertEqual(rule_record.sid, imsi)\n self.assertEqual(rule_record.rule_id, \"rule1\")\n self.assertEqual(rule_record.bytes_tx, 0)\n self.assertEqual(rule_record.bytes_rx, 0)\n rule_map_cookie = self.enforcement_stats_controller.get_stats(1, 0)\n if (rule_map_cookie.records[0].rule_id == self.DEFAULT_DROP_FLOW_NAME):\n rule_record_cookie = rule_map_cookie.records[1]\n else:\n rule_record_cookie = rule_map_cookie.records[0]\n self.assertEqual(rule_record_cookie.sid, imsi)\n self.assertEqual(rule_record_cookie.rule_id, \"rule1\")\n self.assertEqual(rule_record_cookie.bytes_tx, 0)\n self.assertEqual(rule_record_cookie.bytes_rx, 0)", "def doClientSideAttacksTest(self, payloads):\n if self._mode==\"standalone\":\n # Check whether reverseshell is running on remote server (port 12345/tcp)\n # Open socket (it will be closed at the end of the tests)\n # on port 12345/tcp\n print \"Checking if reverse shell is running on remote host\".ljust(65,'.'),\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self._target, int(self.config.get('SERVER', 'reverseshellport'))))\n except:\n print \"[ Failed ]\"\n print \"\\n***ERROR: Please setup reverse shell on remote server first or use --mode=gateway!\"\n sys.exit(0)\n print \"[ OK ]\"\n\n for payload in payloads:\n # Perform test & write report\n str = \"TEST #%s - %s\" % (self.testnum, payload[0])\n print str[:62].ljust(65,'.'),\n test_dt_start = time.strftime('%Y-%m-%d %H:%M:%S')\n\n if self._mode==\"standalone\":\n # Send cmd to execute on server side (wget file)\n s.send(\"wget %s\" % os.path.join(self.config.get('PATHS', 'urlpdf'), payload[0]))\n # Issue 3450032 - Synchronisation issue. The server has to instruct\n # the client that the file has been successfully downloaded before it\n # goes to next file\n s.recv(1024)\n else:\n self.downloadFile(self.config.get('PATHS', 'urlpdf'), payload[0])\n\n # Sleep before getting alerts\n time.sleep(int(self.config.get('TIMING', 'sleepbeforegetalerts')))\n\n # Get new alerts and calculate new offset\n self.getAlertsFile()\n res = self.getAlertsFromOffset(self.config.get('PATHS', 'tempfile'), self.offset)\n\n # Sig matching\n pattern = payload[1]\n if pattern != \"\":\n if re.search(pattern, res):\n test_flag = 2\n else:\n if res == '':\n test_flag = 0\n else:\n test_flag = 1\n test_sig_match = pattern\n else:\n test_sig_match = None\n test_flag = None\n\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n test_dt_end = time.strftime('%Y-%m-%d %H:%M:%S')\n database.DB(self._cnf).addTestResult(('clientSideAttacks', 'wget', test_dt_start,\n test_dt_end, payload[0], None, None, None,\n test_sig_match, res, test_flag))\n\n print \"[ done ]\"\n self.testnum += 1\n\n if self._mode==\"standalone\":\n # Close socket\n s.close()", "def test_scp_callback_return_valid(self):\n self.scp = DummyVerificationSCP()\n self.scp.status = 0x0000\n self.scp.start()\n\n ae = AE()\n ae.add_requested_context(VerificationSOPClass)\n assoc = ae.associate('localhost', 11112)\n assert assoc.is_established\n rsp = assoc.send_c_echo()\n assert rsp.Status == 0x0000\n assoc.release()\n self.scp.stop()", "def test_connection(self):\n\n self.speed_test()\n if self.runs >= self.min_runs:\n self.lg.debug('Minimum number of speed tests performed.')\n self.check_performance()\n if self.bad_performance:\n self.lg.debug('Performance is below tolerance level.')\n self.notify_ISP()\n self.results_up.pop(0)\n self.results_down.pop(0)\n self.results_timestamp.pop(0)\n self.runs += 1", "def runtest(self):", "def test_otoroshi_controllers_adminapi_templates_controller_initiate_tcp_service_tcp(self):\n pass", "def _test_once(self, proto, direction):\n # connect and scan to get RSSI\n dut_ip, rssi = self.setup()\n\n assert direction in ['rx', 'tx']\n assert proto in ['tcp', 'udp']\n\n # run iperf test\n if direction == 'tx':\n if proto == 'tcp':\n self.softap_dut.write('iperf -s -i 1 -t {}'.format(TEST_TIME))\n # wait until DUT TCP server created\n try:\n self.softap_dut.expect('iperf tcp server create successfully', timeout=1)\n except DUT.ExpectTimeout:\n # compatible with old iperf example binary\n pass\n self.dut.write('iperf -c {} -i 1 -t {}'.format(self.softap_ip, TEST_TIME))\n else:\n self.softap_dut.write('iperf -s -u -i 1 -t {}'.format(TEST_TIME))\n self.dut.write('iperf -c {} -u -i 1 -t {}'.format(self.softap_ip, TEST_TIME))\n else:\n if proto == 'tcp':\n self.dut.write('iperf -s -i 1 -t {}'.format(TEST_TIME))\n # wait until DUT TCP server created\n try:\n self.dut.expect('iperf tcp server create successfully', timeout=1)\n except DUT.ExpectTimeout:\n # compatible with old iperf example binary\n pass\n self.softap_dut.write('iperf -c {} -i 1 -t {}'.format(dut_ip, TEST_TIME))\n else:\n self.dut.write('iperf -s -u -i 1 -t {}'.format(TEST_TIME))\n self.softap_dut.write('iperf -c {} -u -i 1 -t {}'.format(dut_ip, TEST_TIME))\n time.sleep(60)\n\n if direction == 'tx':\n server_raw_data = self.dut.read()\n else:\n server_raw_data = self.softap_dut.read()\n self.dut.write('iperf -a')\n self.softap_dut.write('iperf -a')\n self.dut.write('heap')\n heap_size = self.dut.expect(re.compile(r'min heap size: (\\d+)\\D'))[0]\n\n # return server raw data (for parsing test results) and RSSI\n return server_raw_data, rssi, heap_size", "def test_subscriber_access_for_two_vsg_services(self):", "def test_ISUPPORT(self):\n self._sendISUPPORT()" ]
[ "0.69209915", "0.6228882", "0.6222382", "0.62218183", "0.6128936", "0.6066502", "0.6031052", "0.5905359", "0.58518237", "0.5829707", "0.5796335", "0.5789207", "0.5749608", "0.5732316", "0.5731994", "0.57290465", "0.57289624", "0.57172656", "0.5697027", "0.56848615", "0.56813985", "0.5668835", "0.56577176", "0.5635538", "0.56331116", "0.56317776", "0.5551177", "0.5520992", "0.55196935", "0.5515585" ]
0.7182019
0
Compile a model of URI's and Curies and then test the various types
def test_uri_and_curie(self): self.single_file_generator('py', PythonGenerator, filtr=metadata_filter, comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('foo.py'))) # Check that the interpretations are correct self.single_file_generator('jsonld', ContextGenerator, filtr=ldcontext_metadata_filter, comparator=lambda expected, actual: compare_rdf(expected, actual, fmt="json-ld")) self.single_file_generator('json', JSONLDGenerator, filtr=json_metadata_filter) module = compile_python(env.expected_path(self.model_name + '.py')) curie_obj = module.C1("ex:obj1", hasCurie="ex:curie", hasURI="http://example.org/test/uri", hasNcName="A123", id2="ex:id2") instance_jsonld = loads('{ "ex": "http://example.org/test/inst#" }') g = as_rdf(curie_obj, [env.input_path(self.model_name + '.jsonld'), instance_jsonld]) env.eval_single_file(env.expected_path('uriandcurie.ttl'), g.serialize(format='ttl').decode(), lambda s: s, compare_rdf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_resnet18():\n model = RestNet18()\n assert type(model) == RestNet18", "def __init__(self, model_uri: str = None, method: str = \"predict\", modelUri: str = None, type: str = None):\n super().__init__()\n print(model_uri, modelUri, type)\n self.model_uri = model_uri\n self.method = method\n self.ready = False\n self.load()", "def build_model():", "def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type", "def test_model_uris(self):\n for filename in [\n LOCAL_RDF_FILE_NAME,\n LOCAL_SHEXJ_FILE_NAME,\n LOCAL_SHEXC_FILE_NAME,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_SHEXC_FILE_NAME,\n LOCAL_SHEXJ_FILE_NAME,\n LOCAL_RDF_FILE_NAME,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_MAPPINGS_YAML_FILE,\n LOCAL_MAPPINGS_LDCONTEXT_FILE,\n LOCAL_MAPPINGS_JSONLD_FILE,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_TYPES_JSONLD_FILE,\n LOCAL_TYPES_YAML_FILE,\n LOCAL_MAPPINGS_JSONLD_FILE,\n LOCAL_METAMODEL_JSONLD_FILE,\n LOCAL_METAMODEL_LDCONTEXT_FILE,\n ]:\n self.assertTrue(os.path.exists(filename), msg=f\"{filename} does not exist\")\n self.validate_yaml_content(\n METAMODEL_URI,\n METAMODEL_NAME,\n METAMODEL_NAMESPACE_NAME,\n METAMODEL_NAMESPACE,\n LOCAL_METAMODEL_YAML_FILE,\n )\n self.validate_yaml_content(\n METATYPE_URI,\n METATYPE_NAME,\n METATYPE_NAMESPACE_NAME,\n METATYPE_NAMESPACE,\n LOCAL_TYPES_YAML_FILE,\n )\n self.validate_yaml_content(\n METAMAPPING_URI,\n METAMAPPING_NAME,\n METAMAPPING_NAMESPACE_NAME,\n METAMAPPING_NAMESPACE,\n LOCAL_MAPPINGS_YAML_FILE,\n )", "def __init__(self, url, type):\n self.url = url\n self.type = type", "def load(uri: str, type: Optional[str] = None, *args, **kwargs) -> DataObject:\n from . import data # Loads all formats\n from . import core\n\n if type:\n return core.DataObject.registered_types[type].from_uri(uri, *args, **kwargs)\n else:\n return core.DataObject.from_uri(uri, *args, **kwargs)", "def parse(cls, model_path: str, **kwargs):", "def test_model(self):\n url = Urls('https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have-done-these-few-things-e5aaac451942', 'http://172.104.63.163/n4lm9')\n self.assertEqual(url.long,'https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have-done-these-few-things-e5aaac451942')\n self.assertEqual(url.short,'http://172.104.63.163/n4lm9')", "def get_objects_with_cmodel(self, cmodel_uri, type=None):\n uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)\n return [self.get_object(uri, type) for uri in uris]", "def model_for_source_url(self, url):\n if 'cities_reduced_fat/city/' in url:\n return City\n elif 'cities_reduced_fat/country/' in url:\n return Country", "def __setupmodelsfromws(self,out,modeltypesIn,modelnamesIn):\n # -- Could be a list\n if type(modeltypesIn) == list or type(modelnamesIn) == list:\n if type(modelnamesIn) != list:\n raise NameError(\"Coherence requires to 'modelnames' keyword\"\\\n \" being a string, as 'modeltypes' is\")\n if type(modeltypesIn) != list:\n raise NameError(\"'Coherence requires to 'modeltypes' keyword\"\\\n \" being a string, as 'modelnames' is\")\n modeltypeslist = modeltypesIn\n modelnameslist = modelnamesIn\n # Check the same len!!! FIXME\n else:\n modeltypeslist = [modeltypesIn]\n modelnameslist = [modelnameIn] \n # Setting\n for modeltype,modelname in zip(modeltypeslist,modelnameslist):\n try:\n # Note that the regular constructor puts a ntuple\n # containing the observables, so mimicking that\n self.__models[modeltype] = (out[3][modelname],None)\n except KeyError:\n # Who is not there?\n nothere = None\n if self.__models.has_key(modeltype):\n nothere = modeltype\n else:\n # should be here\n nothere = modelname\n raise AttributeError(\"Not found the model '%s'\" % nothere)\n # Don't have this info, so\n self.__pdftypes[modeltype] = '__UNDEF__'", "def test_convert(self):\n gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)\n g = self.converter.convert(gd)\n g.serialize(OUT)\n oi = SparqlImplementation(OntologyResource(OUT))\n # for r in oi.relationships([\"GO:0005773\"]):\n # print(r)\n self.compliance_tester.test_synonyms(oi)\n self.compliance_tester.test_definitions(oi)\n self.compliance_tester.test_sssom_mappings(oi)\n self.compliance_tester.test_relationships(oi)", "def load_model(self) -> Any:", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self,_type: Optional[str] = None,\r\n pre_trained_model_json: Optional[str] = None,\r\n spacy_nlp: Optional[pd.DataFrame] = None):\r\n\r\n if _type is None:\r\n # empty model\r\n self.model = None\r\n self.keywords = None\r\n elif _type == \"fixed\":\r\n if pre_trained_model_json is None:\r\n raise RatingModel.RatingModel.Error(\"pre_trained_model_json is None\")\r\n self.loadModelFixed(pre_trained_model_json)\r\n elif _type == \"lda\":\r\n if pre_trained_model_json is None:\r\n raise RatingModel.RatingModel.Error(\"pre_trained_model_json is None\")\r\n self.loadModelLDA(pre_trained_model_json)\r\n else:\r\n raise RatingModel.RatingModelError( \"type of test not valid. Either 'fixed' or 'lda'\")\r\n\r\n print(\"Loading nlp tools...\")\r\n if spacy_nlp is None:\r\n # load default model\r\n self.nlp = loadDefaultNLP()\r\n else:\r\n self.nlp = spacy_nlp\r\n\r\n print(\"Loading pdf parser...\")\r\n # takes some time\r\n from tika import parser\r\n\r\n self.parser = parser", "def test_valid_model_code_from_valid_coordinate(self, uuid=uuid.uuid4()) -> None:\n expected_model_number = 182\n expected_model_params = {\n \"code\": \"TL2\",\n \"number\": 182,\n \"name\": \"Low broadleaf litter\",\n \"description\": \"Low load broadleaf litter, broadleaf, hardwood litter, spread rate and flame low.\",\n \"fuel_load\": [1.40, 2.30, 2.20, 0.00, 0.00],\n \"type\": \"Static\",\n \"sav_ratio\": [2000.0, 0.0, 0.0],\n \"fuel_bed_depth\": 0.2,\n \"dead_fuel_moisture_of_extinction\": 0.25,\n \"characteristic_sav\": 1806.0,\n \"bulk_density\": 1.35,\n \"relative_packing_ratio\": 5.87\n }\n res = self.app.get('/model-number', query_string={\"lat\": 37.826194, \"lon\": -122.420930, \"uuid\": uuid})\n self.assertEqual(res.status_code, 200)\n model_number = json.loads(res.data.decode('utf-8'))\n self.assertEqual(expected_model_number, model_number)\n\n res = self.app.get('/model-parameters', query_string={\"number\": model_number})\n self.assertEqual(200, res.status_code)\n self.assertEqual(expected_model_params, json.loads(res.data.decode('utf-8')))", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def test_generate_data_model():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n data_model = DataModel(\"test\", [Attribute(**params)])\n\n assert generate_data_model(\"test\", {\"test\": \"str\"}) == data_model", "def test_xray_classifier():\n model = X_ray_Classifier()\n assert type(model) == X_ray_Classifier", "def __init__(self,\n modeltype='TLusty'):\n if modeltype == 'TLusty':\n self.modtype = 'TLusty_v10'\n self.filebase = 'T*v10_z*.dat'\n self.path = '/home/kgordon/Dust/Ext/Model_Standards_Data/'\n self.read_tlusty_models(self.filebase, self.path)\n else:\n print('model type not supported')\n exit()", "def make_melon_type_lookup(melon_types):\n\n # Fill in the rest", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def test_type_of_loaded_sq_objs(self):\n self.s0.save_to_file([self.r0])\n objs = self.s1.load_from_file()\n self.assertEqual(type(objs[0]), Square)", "def test_model_info_basic():\n model = ModelInfo('test description', {'f1': 0.9},\n BaseLocation('protoc://something:8080/thingy'))\n assert 'test description' in model.items['description']\n assert model.items['metrics']['f1'] == 0.9\n assert model.items['location'].get_host() == 'something:8080'\n assert model.items['hash'] is not None", "def _deserialize(self, data):\n uri = data[1:-1]\n # We have to retrieve the type to rebuild the object\n attr = self.__dict__['field']\n # Be careful when orig = None !!!!!\n orig = getattr(attr.model, attr.name)\n if None == orig:\n return rdfSubject(rdflib.term.URIRef(uri))\n elif isinstance(orig, list):\n # rdfalchemy mapper gives me the solution\n rt = attr.model.__class__.__dict__[attr.name].range_type\n from rdfalchemy.orm import mapper\n alch_map = mapper()\n try:\n cls = alch_map[str(rt)]\n return cls(rdflib.term.URIRef(uri))\n except:\n rdfSubject(rdflib.term.URIRef(uri))\n else:\n return type(orig)(rdflib.term.URIRef(uri))", "def test_core_object_types_global():\n for core_object_type in CORE_OBJECT_TYPES:\n core_object = get_object_from_string(core_object_type)\n assert core_object.__name__.lower() == core_object_type" ]
[ "0.57247853", "0.56253564", "0.54596573", "0.535612", "0.53386253", "0.52941513", "0.5280008", "0.5215472", "0.509851", "0.5070356", "0.50533646", "0.49833405", "0.49626672", "0.49501526", "0.49251255", "0.49251255", "0.49180833", "0.49138975", "0.4904311", "0.49033374", "0.4897217", "0.48779684", "0.4862627", "0.48506135", "0.48492086", "0.48394668", "0.4835996", "0.48312083", "0.48277938", "0.48247272" ]
0.67043126
0
Splits document text into a list of sentences, given some model.
def get_sentences_list(text: str, model_type: str) -> t.List[str]: sentences = [] sent_offsets = [] stok = SentenceTokenizer.from_type(model_type) if isinstance(text, list): sentences, sent_offsets = list(zip(*map(stok.tokenize, text))) elif isinstance(text, str): sentences, sent_offsets = stok.tokenize(text) return sentences, sent_offsets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(text):\n doc = nlp(text)\n sentences = [x.text_with_ws for x in doc.sents]\n return sentences", "def get_sentences(text):\n \n return text.split('.')", "def sentence_split(self, text):\n return split_into_sentences(text)", "def split_sentences(self, text):\n assert isinstance(text, str)\n text = text.replace('\\n', '')\n\n if text.strip() == '':\n return []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n sentences = []\n for sentence in output['sentences']:\n num_token = len(sentence['tokens'])\n start_index = sentence['tokens'][0]['characterOffsetBegin']\n end_index = sentence['tokens'][num_token - 1]['characterOffsetEnd']\n sentences.append(text[start_index:end_index])\n return sentences", "def sentences(self) -> List[str]:\n\t\treturn [sentence for sentence in re.split('(?<=[.!?])', self.text)]", "def split_sentences(text):\n # sentence_delimiters = re.compile(r'(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s')\n # sentences = sentence_delimiters.split(text)\n sentences = nltk.sent_tokenize(text)\n return sentences", "def split_sentences(text: str) -> List[str]:\n return sent_tokenize(text)", "def extract_sentences_from_text(self, text_data):\n pass", "def get_sentences(text, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n processed_sentences = [convert_to_string(remove_junk(tokenize_text(sentence, nlp))) for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n # convert the sentences into a list of document vectors\n sentence_vector_list = [nlp(sentence).vector for sentence in processed_sentences]\n\n return sentences, sentence_vector_list", "def splitInSentence(self,text):\n return self._support.splitInPhrase(text)", "def sentences(self, text):\n if not self.__isValidInput(text):\n return [Sentence(text, Sentence.NONE)]\n\n uniText = unicode_str(text)\n result = []\n textLen = len(uniText)\n sentenceLen = c_size_t()\n position = 0\n while textLen > 0:\n sentenceType = self.__lib.voikkoNextSentenceStartUcs4(\n self.__handle,\n uniText[position:],\n textLen,\n byref(sentenceLen),\n )\n sentenceText = uniText[position:position + sentenceLen.value]\n result.append(Sentence(sentenceText, sentenceType))\n if sentenceType == Sentence.NONE:\n break\n position = position + sentenceLen.value\n textLen = textLen - sentenceLen.value\n return result", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def split_into_sentences(text):\n sentences = re.findall('([^\\?\\.\\!]*[\\?\\.\\!]*)', text)\n trimmed_sentences = [x.strip() for x in sentences]\n return [x for x in trimmed_sentences if x]", "def segment(text: str, model: str = \"attacut-sc\") -> List[str]:\n if not text or not isinstance(text, str):\n return []\n\n _tokenizer = AttacutTokenizer(model)\n\n return _tokenizer.tokenize(text)", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def make_sentences(comment):\n sentences = [sent for sent in split_single(comment)]\n return sentences", "def get_sentences(self):\n return [s for s in self.text.split('\\n')]", "def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences", "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def split_doc_into_sentences(doc: Doc) -> List[Span]:\n return [s\n for s in doc.sents\n if len(s.text.strip()) > 0]", "def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()", "def split_text_into_sentences(text: str, language: str='es') -> List[str]:\n if not language in ACCEPTED_LANGUAGES[language]:\n raise ValueError(f'Language {language} is not supported yet')\n\n nlp = spacy.load(language, disable=['tagger', 'parser', 'ner'])\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n text_spacy = nlp(text)\n return [str(sentence) for sentence in text_spacy.sents]", "def split_sentences(text):\n sentenceEnders = re.compile(r\"\"\"\n # Split sentences on whitespace between them.\n (?: # Group for two positive lookbehinds.\n (?<=[.!?]) # Either an end of sentence punct,\n | (?<=[.!?]['\"]) # or end of sentence punct and quote.\n ) # End group of two positive lookbehinds.\n (?<! Mr\\. ) # Don't end sentence on \"Mr.\"\n (?<! Mrs\\. ) # Don't end sentence on \"Mrs.\"\n (?<! Jr\\. ) # Don't end sentence on \"Jr.\"\n (?<! Dr\\. ) # Don't end sentence on \"Dr.\"\n (?<! Prof\\. ) # Don't end sentence on \"Prof.\"\n (?<! Sr\\. ) # Don't end sentence on \"Sr.\"\n (?<! Sen\\. )\n (?<! Ms\\. )\n (?<! Rep\\. )\n (?<! Gov\\. )\n \\s+ # Split on whitespace between sentences.\n \"\"\", re.IGNORECASE | re.VERBOSE)\n sentenceList = sentenceEnders.split(text)\n st_index = [0]\n for s in sentenceEnders.finditer(text):\n st_index.append(s.start())\n return sentenceList, st_index", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def doc2sentence(doc):\n sentences = doc.split('\\n')\n sentences = list(filter(lambda sentence: sentence not in(\"\", \" \", \"\\n\"), sentences))\n return sentences", "def split_to_sentences(data):\r\n sentences = data.split(\"\\n\")\r\n \r\n sentences = [s.strip() for s in sentences]\r\n sentences = [s for s in sentences if len(s) > 0]\r\n \r\n return sentences", "def sentences(self, text):\n return re.findall(r'([A-Z][^\\.!?]*[\\.!?])', text)" ]
[ "0.7736216", "0.7252213", "0.7082826", "0.70524997", "0.6805155", "0.67981815", "0.6738876", "0.6737422", "0.67177343", "0.6689931", "0.666789", "0.66164446", "0.65894055", "0.6582415", "0.6578306", "0.655155", "0.65042615", "0.6432492", "0.641784", "0.638252", "0.638045", "0.637512", "0.6371939", "0.63644135", "0.63616014", "0.63573486", "0.63573486", "0.6352766", "0.6342963", "0.63074857" ]
0.75723946
1
creates a vertical line in the diagram, reaching from the xaxis to the plot at a given time t
def create_time_line(self, axes, t, y, time_value, label): # don't create lines on the very left if time_value == t[-1]: return # create timeLine time_line = Line2D([time_value, time_value], [np.min(y), y[t.index(time_value)]], ls=self.line_style, c=self.line_color) axes.add_line(time_line) axes.text(time_value + self.spacing, self.label_posistions[self.label_counter], label, size=self.font_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vertical_line(t, n):\n lt(t)\n fd(t,n)\n rt(t)", "def plotvertlines(self, ax, time, color='k', label=None):\n if isinstance(time, list):\n t = time.pop()\n ax = self.plotvertlines(ax, t, color=color)\n\n # plot vertical lines of 'predicted' onset/offset\n ax.axvline(time,\n color=color,\n linestyle='dashed',\n linewidth=10, label=label)\n return ax", "def line():\n tt.left(90)\n tt.down()\n tt.forward(50)\n tt.up()\n tt.right(90)\n tt.forward(10)\n tt.right(90)\n tt.forward(50)\n tt.left(90)", "def addTcline(self, line, temperature=None):\n self._checkfigure()\n ld = self._get_linedict(line)\n line = ld['line']\n if temperature is None:\n temperature = ld['Tc']\n color = line.get_c()\n axvl = self.axes.axvline(temperature, color=color, ls='--')\n # self.axvlines.append(axvl)\n ld['vlines'].append(axvl)", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def horizontal_line(t,n, h):\n lt(t)\n pu(t)\n fd(t,h)\n pd(t)\n lt(t)\n fd(t,n)\n rt(t)", "def vline(self, x, y, height, color):\n self.rect(x, y, 1, height, color, fill=True)", "def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )", "def add_vert_lines(self, timestamp: datetime = None) -> None:\n self._logger.debug(\"running\")\n for axes in self.figure.get_axes():\n if timestamp:\n self._vlines.append(timestamp)\n axes.axvline(timestamp)\n self.refresh_self()\n else:\n for line in self._v_lines:\n axes.axvline(line)\n self._logger.debug(\"done\")", "def plt_tide(pt_tide, time_index, start_date, end_date):\n if pt_tide[start_date:end_date].size < time_index:\n raise ValueError('time_index out of specified date range')\n # sub_selected time for vertical bar, as chosen by time_index\n time = pt_tide[start_date:end_date].index[time_index]\n # note conversion to meters\n plt.plot(pt_tide[start_date:end_date]*0.3048)\n max = 3.5\n min = -0.5\n plt.hold('True')\n # plot vertical line at location specified in time\n plt.plot([time, time], [min, max])\n # Clean up and label axes\n plt.ylabel('Elevation [m]')\n plt.gca().xaxis.tick_top()", "def plot_vel_ts(self, variable, ax, **kwargs):\r\n seg = self.rec_seg\r\n nseg = self.nsegments\r\n if variable is 'speed_over_ground':\r\n u = seg.u\r\n v = seg.v\r\n\r\n elif variable is 'smoothed_vel':\r\n smooth = self.rec_smooth\r\n u = smooth.us\r\n v = smooth.vs\r\n time_from_entry = seg.tm - self.t_entry\r\n kwargs['linestyle']='-'\r\n kwargs['linewidth']=0.5\r\n kwargs['markersize']=0.8\r\n ax.plot(time_from_entry, seg.u, marker='o',color='c', \r\n label='u',**kwargs)\r\n ax.plot(time_from_entry, seg.v, marker='s',color='b', \r\n label='v',**kwargs)\r\n ax.set_xlabel('time (seconds)')\r\n ax.set_ylim([-2, 2])\r\n ax.set_yticks([-2, -1, 0, 1, 2])\r\n ax.set_ylabel('velocity (m s$^{-1}$)')\r\n ax.legend(loc='upper right')\r\n\r\n return", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def constructTimeLineItem(self):\n\t\treturn", "def plot(self, x, y, color=\"black\"):\n self.__checkOpen()\n xs,ys = self.toScreen(x,y)\n #self.create_line(xs,ys,xs+1,ys, fill=color)\n _tkExec(self.create_line,xs,ys,xs+1,ys,fill=color,tag=\"line\")\n self.__autoflush()", "def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)", "def hline(self, x, y, width, color):\n self.rect(x, y, width, 1, color, fill=True)", "def draw_line_plot(fig, x, y, labels):\r\n\r\n #Convert times to a displayable format\r\n (x_times, hour_mode) = times_to_axis(x)\r\n\r\n\r\n #Draw grid lines\r\n fig.grid(zorder=0)\r\n\r\n #Draw plot\r\n fig.plot(x_times, y, \"-\", label=None, zorder=2)\r\n \r\n \r\n #If necessary, enable processing of \"datetime\" objects on the x-axis\r\n if not hour_mode:\r\n fig.xaxis_date()\r\n\r\n\r\n #Label and style plot\r\n set_axis_labels(fig, *labels)\r\n style_x_labels(fig)", "def plot_v(t, v):\n p1 = plt.plot(t,v)\n plt.xlabel('Time [s]')\n plt.ylabel('Velocity [m/s]')\n plt.title('Velocity for the skydiver as a function of time')\n plt.show()\n plt.savefig('Parachute_velocity.png')", "def create_plot(self, item):\n\n title = str(item.text())\n data = self._get_data_by_name(title)\n t = self.currentDataset[\"results\"][\"time\"]\n\n dock = pg.dockarea.Dock(title)\n self.area.addDock(dock, \"above\", self.plotDocks[-1])\n\n widget = pg.PlotWidget(title=title)\n widget.plot(x=t, y=data)\n\n time_line = pg.InfiniteLine(self.playbackTime, angle=90, movable=False, pen=pg.mkPen(\"#FF0000\", width=2.0))\n widget.getPlotItem().addItem(time_line)\n\n # enable grid\n widget.showGrid(True, True)\n\n dock.addWidget(widget)\n\n self.plotDocks.append(dock)\n self.plotWidgets.append(widget)\n self.timeLines.append(time_line)", "def line(self, x, y):\n self.call('line', x, y)", "def timeSpaceDiagramMethod(self):\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel('Time (s)', fontsize=24, fontweight='bold')\n ax1.set_ylabel('Distance (m)', fontsize=24, fontweight='bold')\n max_x_limit = self.xAxisRange-100\n plt.xlim([0, max_x_limit])\n plt.ylim([0, max(self.distance_Green)+400])\n plt.xticks(np.arange(0, self.xAxisRange-75, 50), fontsize=24)\n ax1.tick_params(axis='y', labelsize=18)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax1.spines[axis].set_linewidth(4)\n # ax1.set_yticks(ticks=np.arange(0, 100, 20),fontsize = 24)\n #newYlabel = ['-400','0','395','810','1225']\n # plt.gca().set_yticklabels(newYlabel)\n # plt.yticks([])\n req_phase_length = len(self.greenRectangleStartPoint)\n for i in range(0, req_phase_length):\n x = self.greenRectangleStartPoint[i]\n y = self.distance_Green[i]\n ax1.add_patch(Rectangle(\n (x, y), self.greenTime[i], 30, angle=0.0, color='green', linewidth=2,))\n\n req_phase_length = len(self.clearanceRectangleStartPoint)\n for i in range(0, req_phase_length):\n x = self.clearanceRectangleStartPoint[i]\n y = self.distance_Clearance[i]\n ax1.add_patch(Rectangle(\n (x, y), self.clearanceTime[i], 30, angle=0.0, color='red', linewidth=2))\n\n\n if len(self.evTrajectoryTimePoint) > 0:\n ax1.scatter(self.evTrajectoryTimePoint, self.evTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.transitTrajectoryTimePoint) > 0:\n ax1.scatter(self.transitTrajectoryTimePoint, self.transitTrajectoryDistancePoint, c=\"black\",\n linewidths=4, marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.truckTrajectoryTimePoint) > 0:\n ax1.scatter(self.truckTrajectoryTimePoint, self.truckTrajectoryDistancePoint, c=\"black\",\n linewidths=4, marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.carTrajectoryTimePoint) > 0:\n ax1.scatter(self.carTrajectoryTimePoint, self.carTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.connectedVehicleTrajectoryTimePoint) > 0:\n ax1.scatter(self.connectedVehicleTrajectoryTimePoint, self.connectedVehicleTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2) \n\n ax1.legend(loc='upper right', prop={\"size\": 16})\n ax1.set_title(\"Time-Space Diagram\", fontsize=20, fontweight='bold')\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.grid(color='black', linestyle='-', linewidth=0.5)\n plt.show()", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def vline(self, x, y, length, color):\n self.fill_rect(x, y, 1, length, color)", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def line_segment(t, # Array of times for each position\n t_now # Current time (float)\n ):\n i = np.argwhere(t > t_now)\n if len(i) > 0:\n if i[0] != 0: # if the current time is not less than the starting time\n segment_starting_index = i[0][0] - 1\n else:\n segment_starting_index = 0\n\n segment_end_index = i[0][0]\n\n else: # if the current time is more than the last point (destination) time\n segment_starting_index = t.shape[0]\n segment_end_index = t.shape[0]\n\n return segment_starting_index, segment_end_index", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def make_line_plot(data, x_label=\"Data\", y_label=\"Data Point\"):\n\n y = data\n x = range(len(y))\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.plot(x, y)\n plt.show()", "def vline(self, value, zorder=1):\n self.ax.axvline(x=value, color=self.color, linestyle=self.lstyle)" ]
[ "0.68938607", "0.6757993", "0.64868546", "0.6453914", "0.6393406", "0.63680446", "0.6361839", "0.6274058", "0.62633324", "0.61774045", "0.6162524", "0.61366385", "0.61149967", "0.604295", "0.5982257", "0.59538084", "0.5947142", "0.59383583", "0.5909579", "0.5906619", "0.58933216", "0.5879278", "0.5858481", "0.5815473", "0.58012253", "0.57879925", "0.5780209", "0.5774768", "0.57392293", "0.5724235" ]
0.7260301
0
Generate a coordinate moved by the provided `shift` parameters from the current Coordinate.
def move_to(self, shift: Move) -> Coordinate: if shift.direction == "U": new_coordinate = Coordinate(x=self.x, y=self.y + shift.dist) elif shift.direction == "D": new_coordinate = Coordinate(x=self.x, y=self.y - shift.dist) elif shift.direction == "L": new_coordinate = Coordinate(x=self.x - shift.dist, y=self.y) elif shift.direction == "R": new_coordinate = Coordinate(x=self.x + shift.dist, y=self.y) else: raise ValueError(f"Unknown direction: '{shift.dir}'") return new_coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shifted(self, shift):\n new_location = None if self.location is None else self.location + shift\n reference = None if self.reference is None else self.reference + shift\n return self.copy_with_changes(\n location=new_location, reference=reference, derived_from=self,\n )", "def Translate(shift_x, shift_y): \n shifted = numpy.matrix([[1.0, 0.0, 1.0 ],\n [0.0, 1.0, 1.0 ],\n [shift_x, shift_y, 1.0 ]]) \n return shifted", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])", "def shift(self, da, dim, shift):\n # TODO: generalize rolling function, allow custom shifts, handle\n # boundary conditions, etc.\n return da.roll(**{dim: shift})", "def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return", "def move(coord, direction):\n vInc, hInc = dirToIncrement(direction)\n return (coord[0]+vInc, coord[1]+hInc)", "def shift(self, coord, y=None):\n assert isinstance(coord, Coordinate) or isinstance(y, AxisDistance), \"incorrect type of arg coord: should be Coordinate or AxisDistance, is {}\".format(type(coord))\n if y is not None: coord = Coordinate(coord, y)\n self.x += coord.x\n self.y += coord.y\n return self", "def shift_by(self, xshift):\n return Waveform(self.xvec + xshift, self.yvec, self.xtol, order=self.order, ext=self.ext)", "def translateX(shift: float) -> Callable:\n return lambda img: TF.affine(img, 0, (shift * img.size[0], 0), 1, 0)", "def shift(self, shift_vector, inplace=False):\n if len(shift_vector) != len(self.vertex):\n raise GGeoError('Shift vector length must equal geometry rank.')\n\n if inplace:\n self.vertex = tuple([a+b for a,b in zip(self.vertex, shift_vector)])\n return self\n else:\n vertex = tuple([a+b for a,b in zip(self.vertex, shift_vector)])\n return Point(vertex, properties=self.properties, crs=self.crs)", "def shifted(self, shift_by):\n return self - shift_by", "def shift_timestamp(self, shift, timestamp):\n\n shift_msecs = self.convert_shift_to_msecs(shift)\n timestamp_msecs = self.convert_timestamp_to_msecs(timestamp)\n shifted_timestamp_msecs = timestamp_msecs + shift_msecs\n new_timestamp = self.convert_msecs_to_timestamp(shifted_timestamp_msecs)\n\n return new_timestamp", "def shift_coordinate_grid(self, pm_coord, shift_epoch):\n\n # Replace pixel data / WCS with copy centred on source\n contour_background = ContourCutout(\n self.survey,\n pm_coord,\n self.size,\n band=self.band,\n )\n self.data = contour_background.data\n self.wcs = contour_background.wcs\n \n # Astropy for some reason can't decide on calling this pm_ra or pm_ra_cosdec\n try:\n pm_ra = pm_coord.pm_ra\n except AttributeError as e:\n pm_ra = pm_coord.pm_ra_cosdec\n\n # Update CRVAL coordinates based on propagated proper motion\n orig_pos = SkyCoord(\n ra=self.wcs.wcs.crval[0] * u.deg,\n dec=self.wcs.wcs.crval[1] * u.deg,\n frame='icrs',\n distance=pm_coord.distance,\n pm_ra_cosdec=pm_ra,\n pm_dec=pm_coord.pm_dec,\n obstime=pm_coord.obstime,\n )\n newpos = orig_pos.apply_space_motion(shift_epoch)\n\n self.wcs.wcs.crval = [newpos.ra.deg, newpos.dec.deg]\n\n return", "def shift_world(self, shift_x):\n\n # Keep track of the shift amount\n self.world_shift += shift_x\n self.left_x -= shift_x\n self.ori_X += shift_x\n\n # Go through all the sprite lists and shift\n for platform in self.platform_list:\n platform.rect.x += shift_x\n\n for platform in self.decorLayer:\n platform.rect.y += shift_x\n\n for platform in self.platform_fallthrough:\n platform.rect.x += shift_x\n\n for platform in self.platform_quicksand:\n platform.rect.x += shift_x\n\n for platform in self.platform_slime:\n platform.rect.x += shift_x\n\n for platform in self.platform_choose:\n platform.rect.x += shift_x\n\n for enemy in self.enemy_list:\n enemy.rect.x += shift_x\n\n for end in self.end_blocks:\n end.rect.x += shift_x\n\n print(self.ori_X)", "def address_shift(self, shift):\n return self.player_address + shift", "def shift(self):\n return self._shift", "def MoveTo(self, x, y):\n return _Terminal.move % (y,x)", "def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)", "def move_by(self, increment):\n return self.move_to(self.position + increment)", "def shift_with_extension(image, shift):\n\n # Get the size of the image\n image_size = image.size[0]\n\n # Get the x and y shifts\n x, y = shift\n\n # Get the maximum value to shift by\n max_shift = max(abs(x), abs(y))\n\n # Perform the mirror extension of the original image by the correct amount\n extend = mirror_extend(max_shift, image)\n\n # Determine the left right upper and lower indices of the box to crop\n left = max_shift + y\n right = left + image_size\n upper = max_shift + x\n lower = upper + image_size\n\n # Crop the image\n return extend.crop((left, upper, right, lower))", "def __add__(self, coord):\n assert isinstance(coord, Coordinate), \"incorrect type of arg coord: should be Coordinate, is {}\".format(type(coord))\n return self.shift(coord)", "def shift_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point = mutated_genome[index][2][point_index]\n newpoint = (point[0]+Xval,point[1]+Yval)\n mutated_genome[index][2][point_index] = newpoint", "def get_shift() -> int:\n\treturn random.randint(low = -1 *SHIFT_MAX_VAL, high = SHIFT_MAX_VAL)", "def move_to(self, x, y):\n return _Terminal.move % (y, x)", "def timeshift(self, shift='random'):\n\n if shift == 'random':\n one_month = pd.Timedelta('30 days').value\n two_years = pd.Timedelta('730 days').value\n random_timedelta = - pd.Timedelta(random.uniform(one_month, two_years)).round('min')\n self.timeshift(random_timedelta)\n\n if not self.data.index.empty:\n if isinstance(shift, pd.Timestamp):\n timedeltas = self.data.index - self.data.index[0]\n self.data.index = shift.round('min') + timedeltas\n if isinstance(shift, pd.Timedelta):\n self.data.index += shift.round('min')\n self.data['date'] = self.data.index.map(lambda timestamp: timestamp.date())\n self.data['time'] = self.data.index.map(lambda timestamp: timestamp.time())\n else:\n if isinstance(shift, pd.Timestamp):\n timedeltas = self.data['timestamp'] - self.data['timestamp'].min()\n self.data['timestamp'] = shift.round('min') + timedeltas\n if isinstance(shift, pd.Timedelta):\n self.data['timestamp'] += shift.round('min')\n self.data['date'] = self.data['timestamp'].map(lambda timestamp: timestamp.date())\n self.data['time'] = self.data['timestamp'].map(lambda timestamp: timestamp.time())", "def _coordinate_after_moving(self, direction, coordinate):\n\n if direction == 'N':\n new_coordinate = Coordinate(coordinate.x, coordinate.y + 1)\n elif direction == 'S':\n new_coordinate = Coordinate(coordinate.x, coordinate.y - 1)\n elif direction == 'W':\n new_coordinate = Coordinate(coordinate.x - 1, coordinate.y)\n else:\n new_coordinate = Coordinate(coordinate.x + 1, coordinate.y)\n\n if not self._is_coordinate_in_the_grid(new_coordinate):\n raise RoverException(ExceptionMessages.OFF_GRID)\n\n if self._is_coordinate_occupied(new_coordinate):\n raise RoverException(ExceptionMessages.ROVER_COLLISION)\n\n return new_coordinate", "def get_new_coordinate(x_y_coordinate: dict, move_direction: str) -> tuple:\n direction_dict = {'n': (0, -1), 's': (0, 1), 'w': (-1, 0), 'e': (1, 0)}\n x = x_y_coordinate['x'] + direction_dict[move_direction][0]\n y = x_y_coordinate['y'] + direction_dict[move_direction][1]\n return x, y", "def shift_world_x(self, shift_x):\n\n # Keep track of the shift amount\n self.world_shift_x += shift_x\n\n # Go through all the sprite lists and shift\n for platform in self.platform_list:\n platform.rect.x += shift_x\n\n for enemy in self.enemy_list:\n enemy.rect.x += shift_x\n # shift x for projectiles\n if enemy.total_snowballs > 0:\n snowballs = enemy.snowballGroup.sprites()\n for ball in snowballs:\n ball.rect.x += shift_x\n if enemy.numOfDarts > 0:\n darts = enemy.dartGroup.sprites()\n for dart in darts:\n dart.rect.x += shift_x\n\n for exit_door in self.exit_sprite:\n exit_door.rect.x += shift_x\n\n for bag in self.bagGroup:\n bag.rect.x += shift_x" ]
[ "0.70026237", "0.6079565", "0.59989953", "0.5997494", "0.59189904", "0.58717406", "0.5802957", "0.57956624", "0.5785818", "0.5776062", "0.573016", "0.5704016", "0.5698707", "0.56430995", "0.56295216", "0.5617708", "0.5577262", "0.5573564", "0.553183", "0.54844093", "0.54531276", "0.54447865", "0.54377705", "0.54191095", "0.54073316", "0.5391843", "0.53627706", "0.53547096", "0.53513706", "0.5348221" ]
0.7632833
0
Return True if segments share start & end points, accounting for flipping.
def __eq__(self, other: Segment) -> bool: return any( ( self.start == other.start and self.end == other.end, self.start == other.end and self.end == other.start, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def does_overlap(self, start, stop):\n\n ranges = [list(range(key, self.map[key] + 1)) for key in self.map]\n all_coords = [item for sublist in ranges for item in sublist]\n # removing all_coords implementation until we write some tests\n for i in range(start, stop + 1):\n if i in all_coords:\n return True\n return False", "def is_overlapping(segment_time, previous_segments):\n \n segment_start, segment_end = segment_time\n overlap = False\n for previous_start, previous_end in previous_segments:\n if previous_start<=segment_start<=previous_end or previous_start<=segment_end<=previous_end:\n overlap = True\n\n return overlap", "def present_in_slice(self, start, stop):\n return self.starts_before(start) and self.ends_after(stop - 1)", "def overlaps(self, other):\n\n if self.start.equal(other.start) or self.stop.equal(other.stop):\n return True\n elif self.start.before(other.start) and self.stop.after(other.start):\n return True\n elif other.stop.after(self.start) and other.stop.before(self.stop):\n return True\n else:\n return False", "def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match", "def counterSeg(self, x, y, X, Y):\n if self.segs == []:\n return False\n st = self.segs[-1].getStartPoint()\n end = self.segs[-1].getEndPoint()\n return st == (X, Y) and end == (x, y)", "def intersects_segment(\n self, a: Tuple[float, float], b: Tuple[float, float]\n ) -> bool:\n assert len(a) == 2\n assert len(b) == 2\n return bool(lib.cpBBIntersectsSegment(self, a, b))", "def can_overlap(self):\n return False", "def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def can_fix_intersection(self, segment):\n\n points = segment.points\n points = [points[1], points[2], points[3], points[2], points[1], points[0]]\n path = create_path(points)\n layer = GSLayer()\n layer.paths.append(path)\n\n if layer.paths[0].insertNodeWithPathTime_(2.5) is None:\n return False\n for segment in layer.paths[0].segments[:-1]:\n # We need to check only curve segments which consist of four points.\n if len(segment.points) == 4:\n s_t = self.triangle_error_of(segment.points, do_round=True)\n if s_t is not None:\n points = points2vectors(segment.points)\n ok = False\n for s, t in self.calculate_s_t_candidates(points, s_t):\n if self.try_update_points(points, s, t) is not None:\n ok = True\n break\n if not ok:\n return False\n return True", "def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def is_overlap(self, transposon):\n if self.first <= transposon.last <= self.last:\n return True\n elif self.first <= transposon.first <= self.last:\n return True\n else:\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def __contains__(self, angle):\n angle = normalize(angle, min(self.start, self.finish), max(self.start, self.finish))\n return (self.start <= angle < self.finish) or (self.finish <= angle < self.start)", "def overlaps(self, other): # -> bool:\n ...", "def overlaps(self, chrom, start, end, strand=None):\n if (self.chrom != chrom \n or min(self.end, end) - max(self.start, start) <= 0 \n or (strand is not None and self.strand != strand)): \n return False\n return True", "def covers(self, other):\n return self._start <= other._start and self._end >= other._end", "def test_does_intersect() -> None:\n\n line_segment_1 = LineSegment(first=Point(1, 1), second=Point(4, 4))\n reversed_segment = LineSegment(first=Point(4, 4), second=Point(1, 1))\n line_segment_2 = LineSegment(first=Point(1, 1), second=Point(-2, -4))\n line_segment_3 = LineSegment(first=Point(3, 3), second=Point(5, 5))\n line_segment_4 = LineSegment(first=Point(1, 0), second=Point(5, -5))\n\n assert line_segment_1.does_intersect_or_touch(reversed_segment)\n assert line_segment_1.does_intersect_or_touch(line_segment_2)\n assert line_segment_1.does_intersect_or_touch(line_segment_3)\n assert not line_segment_1.does_intersect_or_touch(line_segment_4)", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def fragmented(self) -> bool:\n return not (\n self._begin < self._end or\n self._end == 0\n )", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def has_overlap(vevent, start, end):\n event_start = vevent.dtstart.value\n event_end = vevent.dtend.value\n\n assert not is_naive(start), 'start dt is naive'\n assert not is_naive(end), 'end dt is naive'\n assert not is_naive(event_start), 'event_start dt is naive'\n assert not is_naive(event_end), 'event_end dt is naive'\n\n if start <= event_start <= end: # starts today\n return True\n if start <= event_end <= end: # ends today\n return True\n if event_start <= start and end <= event_end: # spans over today\n return True\n return False", "def pointInSegment(point, segmentPoint1, segmentPoint2):\n\t\tx = point[0]\n\t\ty = point[1]\n\n\t\tif x < segmentPoint1[0] and x < segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif x > segmentPoint1[0] and x > segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif y < segmentPoint1[1] and y < segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\tif y > segmentPoint1[1] and y > segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\treturn True", "def _parallel(*segments):\n if not all(isinstance(s, Line) for s in segments):\n raise TypeError(\"Line._parallel requires all Line objects\")\n\n unique_segments = list(set(segments))\n\n if len(unique_segments) == 0:\n return False\n elif len(unique_segments) == 1:\n return True\n else:\n # take the first segment and translate it to the origin\n first_translated_seg = Line([Point3(0, 0, 0), (segments[0].end - segments[0].start)])\n\n # the given segments are parallel if they are all parallel to the first\n for s in segments[1:]:\n translated_seg = Line([Point3(0, 0, 0), (s.end - s.start)])\n if not first_translated_seg.is_collinear_with(translated_seg):\n return False\n\n return True", "def is_intersect(line_a, line_b):\n # Find the four orientations needed for general and special cases\n orientation_1 = orientation(line_a.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_a)\n orientation_2 = orientation(line_a.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_b)\n orientation_3 = orientation(line_b.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_a)\n orientation_4 = orientation(line_b.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_b)\n\n # General case\n if (orientation_1 != orientation_2 and orientation_3 != orientation_4):\n return True\n\n # Special cases\n if (orientation_1 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_a,\n line_a.endpoint_b)):\n return True\n if (orientation_2 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_b)):\n return True\n if (orientation_3 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_a,\n line_b.endpoint_b)):\n return True\n if (orientation_4 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_b)):\n return True\n\n return False", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def near_segment(point:tuple, edge:tuple)->bool:\n return between(point[0], edge[0][0], edge[1][0]) and between(point[1], edge[0][1], edge[1][1])", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2" ]
[ "0.6702536", "0.6660691", "0.65623313", "0.65462446", "0.6517798", "0.6488373", "0.64770776", "0.6293366", "0.62791336", "0.62655747", "0.62616754", "0.62436944", "0.6191759", "0.6159941", "0.6140473", "0.6125066", "0.6124695", "0.60969627", "0.60839266", "0.607093", "0.6059383", "0.6053239", "0.60503983", "0.6029029", "0.6014387", "0.5996537", "0.5995709", "0.5988255", "0.59856457", "0.5977027" ]
0.6908634
0
Wire a segment from the end of the current Segment using the provides `shift`.
def wire_to(self, shift: Move) -> Segment: return Segment(self.end, self.end.move_to(shift))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift(self, shift_vec: np.ndarray) -> None:\n if len(shift_vec) != 3:\n raise ValueError(\"`shift_vec` must be a three dimensional vector\")\n shift = np.argmax(shift_vec) - 1\n self._head += shift\n if self._head < 0:\n new_buffer = np.zeros_like(self._buffer)\n self._buffer = np.concatenate([new_buffer, self._buffer], axis=0)\n self._head += len(new_buffer)\n elif self._head > len(self._buffer) - 1:\n new_buffer = np.zeros_like(self._buffer)\n self._buffer = np.concatenate([self._buffer, new_buffer], axis=0)", "def shift(self, delay):\n self.__begin.shift(delay)\n self.__end.shift(delay)", "def shifted(self, shift):\n new_location = None if self.location is None else self.location + shift\n reference = None if self.reference is None else self.reference + shift\n return self.copy_with_changes(\n location=new_location, reference=reference, derived_from=self,\n )", "def cg_push_segment(self, cmd):\n assert(cmd.arg1 in self.config.segments)\n base_ptr = self.config.segments[cmd.arg1]\n offset = int(cmd.arg2)\n self.asm(unindent(f\"\"\"\n @{base_ptr}\n D=M // {cmd.arg1} base ptr: {base_ptr}\n @{offset}\n A=D+A // base {base_ptr} + offset {offset}\n D=M // D = *(addr)\n {self._cg_push_D}\n \"\"\"))", "def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())", "def addShift(self,shift):\n self.shifts.append(shift)", "def addShift(self,shift):\n self.shifts.append(shift)", "def wr_dr(self, wr):\n tdi = bits.bits()\n tdi.append_ones(self.ndevs_before)\n tdi.append(wr)\n tdi.append_ones(self.ndevs_after)\n self.driver.scan_dr(tdi)", "def shift_by(self, xshift):\n return Waveform(self.xvec + xshift, self.yvec, self.xtol, order=self.order, ext=self.ext)", "def __rshift__(self, other):\n self.connect(other)", "def shift(self, da, dim, shift):\n # TODO: generalize rolling function, allow custom shifts, handle\n # boundary conditions, etc.\n return da.roll(**{dim: shift})", "def sleep(self,length=10):\n self.rs485.write_command('#{}bs {}'.format(self.address,length))", "def shift(self, el):\n self.register.pop(0)\n self.register.append(el)", "def write(self, segment, result):\n pass", "def shift(self, ds):\n p = np.array(self.GetPosition())\n\n self.SetPosition(p + ds)\n return self", "def shift(options):\n signal = audio.read(options.ipath)\n result = op.shift(signal.data, int(options.factor))\n audio.write(options.opath, result, signal.rate, sampwidth=1)\n if options.plot:\n plotter.plot(**{'Input: '+options.ipath: signal.data,\n 'Output: '+options.opath: result})", "def address_shift(self, shift):\n return self.player_address + shift", "def __rshift__(self, next: 'IO[TResult]') -> 'IO[TResult]':\n return self.bind(lambda _: next)", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def shift(self, obj):\n if self.begin == self.end == None:\n self.begin = self.end = DoubleLinkedListNode(obj, None, None)\n\n elif self.begin == self.end != None:\n self.end = DoubleLinkedListNode(obj, None, self.begin)\n self.begin = DoubleLinkedListNode(self.begin.value, self.end, None)\n\n else:\n x = self.end\n self.end = DoubleLinkedListNode(obj, None, x)\n x.next = self.end", "def add_segment(self):\n last_seg = c.coords(self.segments[0].instance)\n x = last_seg[2] - SEG_SIZE\n y = last_seg[3] - SEG_SIZE\n self.segments.insert(0, Segment(x, y))", "def shift(self, delay):\n self.go_to(self.time + delay)", "def write(self, data):\n self.buffer.write(data)\n self.offset += len(data)", "def add_segment(self, segment):\n self.segments.append(segment)", "def emit(self, record):\n self.buffer.append(record)\n while len(self.buffer) != 0:\n nextRecord = self.buffer.popleft()\n\n super().emit(nextRecord)\n\n if self.sock is None: # If we failed to send the record\n self.buffer.appendleft(nextRecord)\n break", "def shift(self, obj):\n new_node = SingleLinkedListNode(obj, None)\n if self.begin is None:\n self.begin = new_node\n self.end = self.begin\n else:\n new_node.next = self.begin\n self.begin = new_node", "def test_shift_sets_new_tail_is_previous(new_dll):\n new_dll.shift()\n assert new_dll.tail.value == 4", "def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)", "def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n\n self.segment = segment\n if segment is None:\n return\n\n ## reset Strand description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name", "def rshift(self, count):\n self._c = (bitarray('0') * count) + self._c[:-count]" ]
[ "0.59277046", "0.5857198", "0.5500183", "0.5026484", "0.50250506", "0.49946907", "0.49946907", "0.49397922", "0.49365884", "0.49150783", "0.4914794", "0.49120414", "0.49022216", "0.48939252", "0.48647368", "0.48530436", "0.48466676", "0.48441014", "0.48221606", "0.4820676", "0.48197627", "0.48048556", "0.479151", "0.4780857", "0.47696745", "0.47575858", "0.4746213", "0.47427177", "0.47293454", "0.47247005" ]
0.7589725
0
Build wire segments from the internal wiring diagram.
def build_wires(self) -> List[Segment]: segments = [Segment(self.ORIGIN, self.ORIGIN.move_to(self._diagram[0]))] for step in self._diagram[1:]: segments.append(segments[-1].wire_to(step)) return segments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def generate(self, diagram):", "def build_microtrips(self, segment_lenght):\n seg_count = 0\n for data in self.clean_data:\n seg_count = data.segment_all(segment_lenght, seg_count)\n self.microtrip_data.append(MicrotripData(data))", "def build(self):\n code = _upc.EDGE[:]\n\n for _, number in enumerate(self.code[0:6]):\n code += _upc.CODES['L'][int(number)]\n\n code += _upc.MIDDLE\n\n for number in self.code[6:]:\n code += _upc.CODES['R'][int(number)]\n\n code += _upc.EDGE\n\n return [code]", "def _gen_segments(message):\n max_size = constants.UDP_SAFE_SEGMENT_SIZE\n count = (len(message) + max_size - 1) // max_size\n segments = (\n (count - i - 1, message[i * max_size: (i + 1) * max_size])\n for i in range(count)\n )\n return segments", "def _build_legs(self):\n if self._primary_mode == 'transit':\n for transit_leg in self._best_trip.get_transit_legs():\n self._legs.append(transit_leg.get_directions())\n else:\n self._legs.append(self._best_trip.get_directions())", "def _build_topology(self):\n\t\t# childSection.connect(parentSection, [parentX], [childEnd])\n\t\tfor i in range(self._axonNodes-1):\n\t\t\tself.node[i].connect(self.mysa[2*i],0,1)\n\t\t\tself.mysa[2*i].connect(self.flut[2*i],0,1)\n\t\t\tself.flut[2*i].connect(self.stin[6*i],0,1)\n\t\t\tself.stin[6*i].connect(self.stin[6*i+1],0,1)\n\t\t\tself.stin[6*i+1].connect(self.stin[6*i+2],0,1)\n\t\t\tself.stin[6*i+2].connect(self.stin[6*i+3],0,1)\n\t\t\tself.stin[6*i+3].connect(self.stin[6*i+4],0,1)\n\t\t\tself.stin[6*i+4].connect(self.stin[6*i+5],0,1)\n\t\t\tself.stin[6*i+5].connect(self.flut[2*i+1],0,1)\n\t\t\tself.flut[2*i+1].connect(self.mysa[2*i+1],0,1)\n\t\t\tself.mysa[2*i+1].connect(self.node[i+1],0,1)", "def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return", "def _build_directions(self):\n d = {'start': self.get_start(), 'end': self.get_end(), 'duration': self.get_duration(),\n 'mode': self.get_primary_mode(), 'price_range': self.get_price_range(), 'legs': self.get_legs(),\n 'start_location': self.get_start_location(), 'end_location': self.get_end_location()}\n self.set_directions(d)", "def construct_segment(self):\n segment = Segment(\n model_id = self.model_id,\n chain_id = self.chain_id)\n\n segment.chain = self.chain\n segment.model = self.model\n\n return segment", "def segment(data):", "def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def construct_segment(self):\n segment = Segment(\n model_id = self.model_id,\n chain_id = self.chain_id)\n\n segment.chain = self\n segment.model = self.model\n\n return segment", "def convert_segments(segments):\n polygons = []\n interiors = []\n linestrings = []\n for segment in segments:\n ls = LineString(segment)\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n lr = LinearRing(ls)\n if not lr.is_ccw:\n polygons.append(Polygon(segment))\n else:\n interiors.append(lr)\n continue\n linestrings.append(ls)\n\n return polygons, interiors, linestrings", "def get_diagram(self):\n self_nodes=self.nodes.all()\n self_arrows=self.arrows.all()\n \n \n if len(self_nodes)==0:\n return False\n \n nodes = [n.get_icon_obj() for n in self_nodes]\n node_liens = [n.liens.all() for n in self_nodes]\n \n pairs = []\n for n,n_liens in zip(nodes,node_liens):\n if len(n_liens)==0:\n liens = Lien.objects.filter(cause__id=n.target_id).all()\n liens = [l.consequence.id for l in liens]\n temp = [(n,target) for target in nodes if target.target_id in liens]\n pairs.extend(temp)\n else:\n ids=set([(i.cause.id,i.consequence.id) for i in n_liens])\n for n in nodes:\n pairs.extend([(n,i) for i in nodes if i is not n and \n (n.target_id,i.target_id) in ids])\n ids = set([(i.cause.id,i.consequence.id) for i in self_arrows])\n pairs = [p for p in pairs if (p[0].target_id,p[1].target_id) not in ids]\n \n lines=[]\n arrows=[]\n for obj in self_arrows:\n \n n0=[i for i in nodes if i.node_id==obj.cause.id]\n n1=[i for i in nodes if i.node_id==obj.consequence.id]\n if len(n0)!=1 or len(n1)!=1:\n continue\n n0=n0[0]\n n1=n1[0]\n \n pt=[(obj.X0, obj.Y0), (obj.X1,obj.Y1)]\n pt=[np.array(i) for i in pt if None not in i]\n if len(pt)==0:\n pairs.append((n0,n1))\n continue\n pairs = [p for p in pairs if (p[0].node_id,p[1].node_id)!=(n0.node_id,n1.node_id)]\n vect = pt[0]-np.array(n0.pos)\n first_pt = np.array(n0.pos)+vect*n0.size/np.sqrt(sum(vect*vect))\n vect = np.array(n1.pos) - pt[-1]\n last_pt = np.array(n1.pos)-vect*n1.size/np.sqrt(sum(vect*vect))\n pt=[first_pt,*pt,last_pt]\n \n lines.extend([((*i,*j),n0.color) for i,j in zip(pt[:-1],pt[1:])])\n arrows.append(((*pt[-2],*pt[-1]),n0.color))\n \n \n margin=10\n line_width=2\n \n diagram=DiagramObj(self.id,nodes,pairs,margin,\n self.width,self.height,line_width)\n diagram.add_arrows(arrows,lines)\n print(diagram.lines)\n return diagram", "def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res", "def _build_graph(self):\n pass", "def _writeSegmentsRealization(self, writeTo):\n pivotID = self._templateROM.pivotParameterID\n pivot = self._indexValues[pivotID]\n # realization to add eventually\n rlz = {}\n segmentNames = range(len(self._divisionInfo['delimiters']))\n # pivot for all this stuff is the segment number\n rlz['segment_number'] = np.asarray(segmentNames)\n # start indices\n varName = 'seg_index_start'\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(list(d[0] for d in self._divisionInfo['delimiters']))\n # end indices\n varName = 'seg_index_end'\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(list(d[-1] for d in self._divisionInfo['delimiters']))\n # pivot start values\n varName = 'seg_{}_start'.format(self._templateROM.pivotParameterID)\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(list(pivot[d[0]] for d in self._divisionInfo['delimiters']))\n # pivot end values\n varName = 'seg_{}_end'.format(self._templateROM.pivotParameterID)\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(list(pivot[d[-1]] for d in self._divisionInfo['delimiters']))\n return rlz", "def make_segments(self, set_depth=True, input_rec_track=None,\r\n overwrite_rec_seg=False): \r\n grd = self.grd\r\n if input_rec_track is None:\r\n rec_track = self.rec_track\r\n else:\r\n rec_track = input_rec_track\r\n ndetects = len(rec_track)\r\n nsegments = ndetects - 1\r\n self.nsegments = nsegments\r\n if nsegments < 1:\r\n return\r\n x1 = np.nan*np.ones(nsegments, np.float64)\r\n y1 = np.nan*np.ones(nsegments, np.float64)\r\n z1 = np.nan*np.ones(nsegments, np.float64)\r\n x2 = np.nan*np.ones(nsegments, np.float64)\r\n y2 = np.nan*np.ones(nsegments, np.float64)\r\n z2 = np.nan*np.ones(nsegments, np.float64)\r\n xm = np.nan*np.ones(nsegments, np.float64)\r\n ym = np.nan*np.ones(nsegments, np.float64)\r\n zm = np.nan*np.ones(nsegments, np.float64)\r\n t1 = np.nan*np.ones(nsegments, np.float64)\r\n t2 = np.nan*np.ones(nsegments, np.float64)\r\n tm = np.nan*np.ones(nsegments, np.float64)\r\n dn1 = np.nan*np.ones(nsegments, np.float64)\r\n dn2 = np.nan*np.ones(nsegments, np.float64)\r\n dnm = np.nan*np.ones(nsegments, np.float64)\r\n dt = np.nan*np.ones(nsegments, np.float64)\r\n dist = np.nan*np.ones(nsegments, np.float64)\r\n speed = np.nan*np.ones(nsegments, np.float64)\r\n u = np.nan*np.ones(nsegments, np.float64)\r\n v = np.nan*np.ones(nsegments, np.float64)\r\n head = np.nan*np.ones(nsegments, np.float64)\r\n idetect = np.nan*np.ones(nsegments, np.int32)\r\n tag_id = []\r\n for nd in range(1,ndetects):\r\n tag_id.append(self.ID)\r\n ns = nd-1\r\n x1[ns] = rec_track.X[nd-1]\r\n y1[ns] = rec_track.Y[nd-1]\r\n z1[ns] = rec_track.Z[nd-1]\r\n x2[ns] = rec_track.X[nd]\r\n y2[ns] = rec_track.Y[nd]\r\n z2[ns] = rec_track.Z[nd]\r\n t1[ns] = rec_track.Sec[nd-1]\r\n t2[ns] = rec_track.Sec[nd]\r\n dn1[ns] = rec_track.dnums[nd-1]\r\n dn2[ns] = rec_track.dnums[nd]\r\n dx = x2[ns] - x1[ns]\r\n dy = y2[ns] - y1[ns]\r\n xm[ns] = 0.5*(x1[ns]+x2[ns])\r\n ym[ns] = 0.5*(y1[ns]+y2[ns])\r\n zm[ns] = 0.5*(z1[ns]+z2[ns])\r\n tm[ns] = 0.5*(t1[ns]+t2[ns])\r\n dnm[ns] = 0.5*(dn1[ns]+dn2[ns])\r\n dt[ns] = t2[ns] - t1[ns]\r\n dist[ns] = np.sqrt(dx*dx + dy*dy)\r\n if dt[ns] > 0.0:\r\n speed[ns] = dist[ns]/dt[ns] \r\n u[ns] = dx/dt[ns]\r\n v[ns] = dy/dt[ns]\r\n head[ns] = math.atan2(v[ns],u[ns])\r\n df_seg = pd.DataFrame({'id':np.asarray(tag_id), \r\n 'x1':x1, 'x2':x2, 'xm':xm,\r\n 'y1':y1, 'y2':y2, 'ym':ym,\r\n 'z1':z1, 'z2':z2, 'zm':zm,\r\n 't1':t1, 't2':t2, 'tm':tm,\r\n 'dn1':dn1, 'dn2':dn2, 'dnm':dnm,\r\n 'dt':dt, 'dist':dist, 'head':head,\r\n 'speed':speed, 'u':u, 'v':v})\r\n if set_depth:\r\n depth = np.nan*np.ones(nsegments, np.float64)\r\n for ns in range(nsegments):\r\n xy = [xm[ns], ym[ns]]\r\n i = grd.select_cells_nearest(xy, inside=True)\r\n if i is None: i=-1\r\n idetect[ns] = i\r\n if i >= 0:\r\n depth[ns] = grd.cells['depth'][i]\r\n df_seg = df_seg.assign(depth = depth)\r\n df_seg = df_seg.assign(i = idetect)\r\n\r\n rec_seg = df_seg.to_records()\r\n if (input_rec_track is None) or overwrite_rec_seg:\r\n self.df_seg = copy.deepcopy(df_seg)\r\n self.rec_seg = copy.deepcopy(rec_seg)\r\n return\r\n else:\r\n return rec_seg", "def make_instructions(self):\n #de, aux, vers = self.rods\n de, aux, vers = 0, 1, 2\n n = self.num_rings\n\n self.recur(n, de, aux, vers)\n\n ### Add dummy tuple at end so I can look one move ahead on states\n self.instructions.append((0, 0, 0))", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def extract_diagram(self):\n nodes = []\n edges = []\n \n for clump in self.clumps:\n new_nodes, new_edges = clump.get_diagram_representation()\n nodes.extend(new_nodes)\n edges.extend(new_edges)\n #nodes.append(backend.JunctionNode(clump))\n # TODO: move to Tunnel.get_diagram_representation()\n for tunnel in self.tunnels:\n# print tunnel\n edges.append(TunnelEdge(tunnel))\n return nodes, edges", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def parse_and_construct_graphic_layer(ds):\r\n graphic_layers = list()\r\n for item in ds.SegmentSequence:\r\n layer = {\r\n \"GraphicLayer\": str(item.SegmentDescription).upper(),\r\n \"GraphicLayerOrder\": item.SegmentNumber,\r\n \"GraphicLayerRecommendedDisplayCIELabValue\": [49512, 38656, 52736]\r\n }\r\n graphic_layers.append(layer)\r\n return graphic_layers", "def build_graph(self):\n pass", "def _parse_diagram(wiring_diagram: str) -> List[Move]:\n return [\n Move(direction=shift[0], dist=int(shift[1:])) for shift in wiring_diagram.split(\",\")\n ]" ]
[ "0.627155", "0.5714515", "0.5531439", "0.53391623", "0.5144474", "0.5128602", "0.5128105", "0.5107117", "0.5049788", "0.50365037", "0.50336444", "0.50184613", "0.500898", "0.49955854", "0.4993949", "0.49708354", "0.4966822", "0.49333", "0.49315196", "0.49184826", "0.4903525", "0.4903436", "0.48736462", "0.4855879", "0.4855879", "0.4855879", "0.4855879", "0.48259056", "0.48252854", "0.48071116" ]
0.7345104
0
Find the closest intersection to the origin, by Manahattan distance, of two Wires.
def closest_intersect_manhattan(self, other: Wire) -> Tuple[Coordinate, int]: intersection = sorted(self.intersect(other), key=lambda x: self.ORIGIN.dist(x.location))[0] return intersection, self.ORIGIN.dist(intersection.location)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findIntersect(wires):\n allSets = list(map(lambda w: coordsFor(w), wires))\n baseSet = allSets[0]\n for s in allSets[1:]:\n baseSet.intersection_update(s)\n central = (0, 0)\n distances = list(map(lambda c: manhattan(central, c), baseSet))\n return min(distances)", "def get_closest_intersection(wire1, wire2):\n pass", "def distance(wires) -> int:\n\n wire_0_pos = get_positions(wires[0])\n wire_1_pos = get_positions(wires[1])\n\n # find intersections\n intersections = list(set(wire_0_pos).intersection(set(wire_1_pos)))\n # ignore the 0,0 intersect\n intersections.remove((0, 0))\n m_distances = [manhattan_distance(x, y) for x, y in intersections]\n\n\n return min(m_distances)", "def solve_part_one(wire_one_map, wire_two_map):\n return int(min([manhattan_distance(x, y) for (x, y) in find_intersection(wire_one_map, wire_two_map)]))", "def find_closest_intersections(wire_data):\n\n # Find the intersection of the two lists\n intersections = find_intersections(wire_data)\n\n # For each intersection measure distance from the centre\n dists = [abs(point[0]) + abs(point[1]) for point in intersections]\n\n return min(dists)", "def heuristic(self, a, b):\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def manhattan_distance(x, y):\n return sum(abs(a - b) for a, b in zip(x, y))", "def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w", "def get_distance_of_closest_intersections(commands1, commands2):\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n return min(map(lambda x: np.abs(x[0])+np.abs(x[1]), intersections))", "def shortest_manhattan_distance(coordinates):\n current_minimum = sys.maxsize\n\n for x, y in coordinates:\n if abs(x) + abs(y) < current_minimum:\n current_minimum = abs(x) + abs(y)\n\n return current_minimum", "def manhattan_distance(x, y):\n return abs(x) + abs(y)", "def manhattan_distance(a: ArrayLike, b: ArrayLike) -> NDArrayFloat:\n\n return as_float(\n np.sum(np.abs(as_float_array(a) - as_float_array(b)), axis=-1)\n )", "def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return(sum(tuple(abs(i-j) for i,j in zip(loc1,loc2))))\n # END_YOUR_CODE", "def find_closest(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = [np.argmin(abs(b - a1)) for a1 in a]\n return out", "def manhattan_distance(origin, destination):\n return abs(destination.row - origin.row) + \\\n abs(destination.column - origin.column)", "def manhattan_distance_between(start, destination):\n return abs(destination.x - start.x) + abs(destination.y - start.y)", "def shortest_distance(puzzle_input: List[str], satellite_name_a: str, satellite_name_b: str) -> Tuple[int, str]:\n orbit_tree = make_tree(puzzle_input)\n\n distances_satellite_a = distance_to_objects(orbit_tree, satellite_name_a)\n\n distances_satellite_b = distance_to_objects(orbit_tree, satellite_name_b)\n\n # & gives the intersection between the sets of keys, leaving only the objects they both orbit directly/indirectly\n objects_in_common = set(distances_satellite_a.keys()) & set(distances_satellite_b.keys())\n distances = [\n # Sum of distance from satellite a, b to each object, object name\n (distances_satellite_a[obj] + distances_satellite_b[obj], obj)\n for obj in objects_in_common\n ]\n\n min_distance, satellite_name = min(distances)\n return min_distance, satellite_name", "def manhattan_distance(self):\n x, y = self.start\n other_x, other_y = self.x, self.y\n print(abs(x - other_x) + abs(y - other_y))", "def closest_points(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n #soln = np.linalg.solve(A, b)\n soln = np.linalg.pinv(A).dot(b)\n s, t = soln[0], -soln[1]\n\n return s*w + self.p0, t*v + p0_other", "def closest_intersect_steps(self, other: Wire) -> Tuple[Intersection, int]:\n intersections = self.intersect(other)\n\n # For each intersection, iterate along each wire's path until the intersection is\n # encountered, keeping track of the number of steps taken\n distances = []\n for intersection in intersections:\n total_steps = 0\n for wire in (self, other):\n for segment in wire.wire_segments:\n try:\n total_steps += segment.steps.index(intersection.location)\n break\n except ValueError:\n # The intersection coordinate isn't in our segment\n total_steps += len(segment.steps) - 1\n\n distances.append((intersection, total_steps))\n\n return sorted(distances, key=lambda x: x[1])[0]", "def closest_distance(self, time, other_object, other_time):\n ti = np.where(self.times == time)[0][0]\n oti = np.where(other_object.times == other_time)[0][0]\n xs = self.x[ti].ravel()[self.masks[ti].ravel() == 1]\n xs = xs.reshape(xs.size, 1)\n ys = self.y[ti].ravel()[self.masks[ti].ravel() == 1]\n ys = ys.reshape(ys.size, 1)\n o_xs = other_object.x[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_xs = o_xs.reshape(1, o_xs.size)\n o_ys = other_object.y[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_ys = o_ys.reshape(1, o_ys.size)\n distances = (xs - o_xs) ** 2 + (ys - o_ys) ** 2\n return np.sqrt(distances.min())", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def get_manhattan_dist(row1, col1, row2, col2):\n distHoriz = abs(row1 - row2)\n distVert = abs(col1 - col2)\n dist = distHoriz + distVert\n return dist", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def closest_other(state):\n locations = others_locations(state)\n distances_ = distances(my_location(state), list(locations.values()))\n dist_dict = {key: dist for key, dist in zip(locations, distances_)}\n target = util.argmin_dict(dist_dict)\n return target" ]
[ "0.73668647", "0.71624225", "0.6995059", "0.688957", "0.6259799", "0.6222615", "0.6161806", "0.6148054", "0.5992006", "0.5981919", "0.5977006", "0.59560627", "0.59306633", "0.59270483", "0.59255", "0.5852488", "0.58326423", "0.5826113", "0.58249897", "0.5812551", "0.5808463", "0.5791225", "0.5779713", "0.5770785", "0.5753369", "0.5734163", "0.57179785", "0.5710641", "0.5704868", "0.57021767" ]
0.78951204
0
Find the closest intersection to the origin, by step distance, of two Wires.
def closest_intersect_steps(self, other: Wire) -> Tuple[Intersection, int]: intersections = self.intersect(other) # For each intersection, iterate along each wire's path until the intersection is # encountered, keeping track of the number of steps taken distances = [] for intersection in intersections: total_steps = 0 for wire in (self, other): for segment in wire.wire_segments: try: total_steps += segment.steps.index(intersection.location) break except ValueError: # The intersection coordinate isn't in our segment total_steps += len(segment.steps) - 1 distances.append((intersection, total_steps)) return sorted(distances, key=lambda x: x[1])[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_closest_intersection(wire1, wire2):\n pass", "def findIntersect(wires):\n allSets = list(map(lambda w: coordsFor(w), wires))\n baseSet = allSets[0]\n for s in allSets[1:]:\n baseSet.intersection_update(s)\n central = (0, 0)\n distances = list(map(lambda c: manhattan(central, c), baseSet))\n return min(distances)", "def closest_intersect_manhattan(self, other: Wire) -> Tuple[Coordinate, int]:\n intersection = sorted(self.intersect(other), key=lambda x: self.ORIGIN.dist(x.location))[0]\n\n return intersection, self.ORIGIN.dist(intersection.location)", "def find_closest_intersections(wire_data):\n\n # Find the intersection of the two lists\n intersections = find_intersections(wire_data)\n\n # For each intersection measure distance from the centre\n dists = [abs(point[0]) + abs(point[1]) for point in intersections]\n\n return min(dists)", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def get_overland(p1, p2, tolerance = 0.1, min_slope = 0.00001):\n\n L = get_distance(p1, p2)\n\n if L > tolerance: return L / 2., (p1[2] - p2[2]) / L / 100000\n else: return tolerance, min_slope", "def heuristic(self, a, b):\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])", "def get_steps_of_closest_intersections(commands1, commands2):\n\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n # index is 0 based, therefore +2\n return min(map(lambda x: path1.index(x) + path2.index(x), intersections)) + 2", "def _get_closest(self, x, y, clients):\n target = min(\n clients,\n key=lambda c: math.hypot(c.x - x, c.y - y),\n default=self.clients.current_client,\n )\n return target", "def find_closest(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = [np.argmin(abs(b - a1)) for a1 in a]\n return out", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w", "def get_distance_of_closest_intersections(commands1, commands2):\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n return min(map(lambda x: np.abs(x[0])+np.abs(x[1]), intersections))", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def steps(wires) -> int:\n wire_0_pos = get_positions(wires[0])\n wire_1_pos = get_positions(wires[1])\n\n # find intersections\n intersections = list(set(wire_0_pos).intersection(set(wire_1_pos)))\n intersections.remove((0, 0))\n steps = [wire_0_pos.index(intersection) + wire_1_pos.index(intersection) for intersection in intersections]\n\n return min(steps)", "def closest_points(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n #soln = np.linalg.solve(A, b)\n soln = np.linalg.pinv(A).dot(b)\n s, t = soln[0], -soln[1]\n\n return s*w + self.p0, t*v + p0_other", "def minimalDistance(a1, a2, b1, b2):\n adir = a2 - a1\n bdir = b2 - b1\n amid = a1 + 0.5 * adir\n s = b1 - amid\n A = np.dot(bdir, bdir)\n B_2 = np.dot(bdir, s)\n lambda_beta = - B_2 / A\n bOpt = lambda_beta * bdir + b1\n s = a1 - bOpt\n A = np.dot(adir, adir)\n B_2 = np.dot(adir, s)\n lambda_alpha = - B_2 / A\n aOpt = lambda_alpha * adir + a1\n Delta = bOpt - aOpt\n return np.sqrt(np.dot(Delta, Delta))", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def distance(wires) -> int:\n\n wire_0_pos = get_positions(wires[0])\n wire_1_pos = get_positions(wires[1])\n\n # find intersections\n intersections = list(set(wire_0_pos).intersection(set(wire_1_pos)))\n # ignore the 0,0 intersect\n intersections.remove((0, 0))\n m_distances = [manhattan_distance(x, y) for x, y in intersections]\n\n\n return min(m_distances)", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)", "def find_intersection(center0, direction0, center1, direction1):\n # c0 + d0 t = c1 + d1 s\n # (-d0) t + (d1) s = c0 - c1\n # [-d0, d1] [t,s]^T = delta\n A = np.array([-direction0, direction1]).T\n delta = center0 - center1\n # Unpack M = A^T * A:\n # [[a, b],\n # [c, d]]\n (a, b), (c, d) = A.T.dot(A)\n # Inverse of M:\n # 1/ det(M) [[ d, -b],\n # [-c, a]]\n M_inv = np.array([[d, -b], [-c, a]]) / (a * d - b * c)\n t, s = M_inv.dot(A.T.dot(delta))\n return t, s", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None", "def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None", "def shortest_distance(self, begin, end):\n\n begin_index = self._cell_indexes[begin]\n end_index = self._cell_indexes[end]\n\n distance = self._distance_mat[begin_index, end_index]\n # distance *= pq.meter\n\n path = [begin]\n inv_index = {v: k for k, v in self._cell_indexes.items()}\n while True:\n next_index = self._preds[end_index, begin_index]\n if next_index == -9999:\n break\n\n begin_index = next_index\n\n seg = inv_index[next_index]\n path.append(seg)\n\n return distance, path", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def closest_other(state):\n locations = others_locations(state)\n distances_ = distances(my_location(state), list(locations.values()))\n dist_dict = {key: dist for key, dist in zip(locations, distances_)}\n target = util.argmin_dict(dist_dict)\n return target", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )" ]
[ "0.7209214", "0.63660127", "0.62385994", "0.6171734", "0.59056085", "0.5888943", "0.5881519", "0.586255", "0.5790623", "0.5756521", "0.57293314", "0.57029945", "0.5671344", "0.5662922", "0.56311053", "0.56151825", "0.5565326", "0.55585504", "0.5547729", "0.55357975", "0.5535426", "0.55329597", "0.55327123", "0.5508527", "0.5483756", "0.54661614", "0.5459483", "0.5455496", "0.5435773", "0.5403871" ]
0.7036371
1
Parse the input wiring diagram into a list of Move named tuples. Wiring diagrams are assumed to be of the form "R8,U15,L5,D23"
def _parse_diagram(wiring_diagram: str) -> List[Move]: return [ Move(direction=shift[0], dist=int(shift[1:])) for shift in wiring_diagram.split(",") ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __parse_move_line(self, line):\n parts = re.sub('\\(.*?\\)', '', line).split()\n x, y = None, None\n for part in parts[:0:-1]:\n axis = part.upper()[0]\n value = float(part[1:])\n if axis in ['Z', 'F']:\n parts.remove(part)\n elif axis == 'X':\n x = value\n parts.remove(part)\n elif axis == 'Y':\n y = value\n parts.remove(part)\n if x is None or y is None:\n return None\n template = parts[0] + ' X{:.6f} Y{:.6f} ' + ' '.join(parts[1:])\n return [template, x, y]", "def parse_moves(moves):\n possible_moves = []\n for move in moves:\n if move == 'W':\n possible_moves.append('UP')\n elif move == 'D':\n possible_moves.append('RIGHT')\n elif move == 'S':\n possible_moves.append('DOWN')\n elif move == 'A':\n possible_moves.append('LEFT')\n return possible_moves", "def __parse_line(moves: str, start: int) -> List[str]:\n\n return [moves[start + j] for j in range(SIZE)]", "def _parse_input(moves: str) -> List[Line]:\n\n h_lines = []\n start = 0\n\n for i in range(SIZE):\n line_data = __parse_line(moves.replace(\"_\", EMPTY), start)\n h_lines.append(Line(line_data))\n start += SIZE\n\n return h_lines", "def _parse_move_statement(dlstr):\n\n try:\n tokens = dlstr.lower().split()\n if tokens[0] != \"move\":\n raise ValueError(\"Expected 'move' statement\")\n\n mtype, nmove, pfreq, rmin = \\\n tokens[1], int(tokens[2]), int(tokens[3]), float(tokens[4])\n except IndexError:\n raise ValueError(\"Badly formed 'move' statement?\")\n\n return mtype, nmove, pfreq, rmin", "def main():\n moveList = ('R4, R3, L3, L2, L1, R1, L1, R2, R3, L5, L5, R4, L4, R2, R4, '\n 'L3, R3, L3, R3, R4, R2, L1, R2, L3, L2, L1, R3, R5, L1, L4, '\n 'R2, L4, R3, R1, R2, L5, R2, L189, R5, L5, R52, R3, L1, R4, '\n 'R5, R1, R4, L1, L3, R2, L2, L3, R4, R3, L2, L5, R4, R5, L2, '\n 'R2, L1, L3, R3, L4, R4, R5, L1, L1, R3, L5, L2, R76, R2, R2, '\n 'L1, L3, R189, L3, L4, L1, L3, R5, R4, L1, R1, L1, L1, R2, '\n 'L4, R2, L5, L5, L5, R2, L4, L5, R4, R4, R5, L5, R3, L1, L3, '\n 'L1, L1, L3, L4, R5, L3, R5, R3, R3, L5, L5, R3, R4, L3, R3, '\n 'R1, R3, R2, R2, L1, R1, L3, L3, L3, L1, R2, L1, R4, R4, L1, '\n 'L1, R3, R3, R4, R1, L5, L2, R2, R3, R2, L3, R4, L5, R1, R4, '\n 'R5, R4, L4, R1, L3, R1, R3, L2, L3, R1, L2, R3, L3, L1, L3, '\n 'R4, L4, L5, R3, R5, R4, R1, L2, R3, R5, L5, L4, L1, L1')\n moveList = moveList.replace(' ', '').split(',')\n\n elf = Path()\n\n for move in moveList:\n start = [elf.x, elf.y]\n print('Elf turning {} and walking for {} steps.').format(\n move[0], move[1:])\n elf.move(move[0], move[1:])\n end = [elf.x, elf.y]\n if(addMoveToList(elf, start, end)):\n break\n print('Elf ended in position {},{}').format(elf.x, elf.y)\n print('Shortest distance from origin to EB HQ is: {}').format(\n abs(elf.x) + abs(elf.y))", "def parse_command_to_actions(moving_command):\n regex = re.compile(r'[A-Z][0-9]*')\n return re.findall(regex, moving_command)", "def parse_move_to_square(self, uci_move: str):\n chars = utils.split_string_to_chars(uci_move)\n square_from = ''.join(chars[0] + chars[1])\n square_to = ''.join(chars[2] + chars[3])\n return square_from, square_to", "def read_move(self, steps):\n res = []\n size = len(steps[0])\n side_size = int(math.sqrt(size))\n for i in range(0, len(steps) - 1):\n state = steps[i]\n next_state = steps[i + 1]\n next_pos = next_state.index(0)\n pos = state.index(0)\n rel = next_pos - pos\n direction = 'up'\n if rel == 1:\n direction = 'right'\n if rel == -1:\n direction = 'left'\n if rel == side_size:\n direction = 'down'\n res.append(direction)\n return res", "def _parse_move(origin, destination, axis):\n # If only one set of coordinates is defined, make sure it's used to move things\n if destination is None:\n destination = origin\n origin = [0, 0]\n\n d = _parse_coordinate(destination)\n o = _parse_coordinate(origin)\n if axis == \"x\":\n d = (d[0], o[1])\n if axis == \"y\":\n d = (o[0], d[1])\n dx, dy = np.array(d) - o\n\n return dx, dy", "def deserialize(self, str):\n try:\n if self.home is None:\n self.home = flyaq.msg.Coordinate()\n if self.movements is None:\n self.movements = None\n if self.move_transitions is None:\n self.move_transitions = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type_name = str[start:end].decode('utf-8')\n else:\n self.type_name = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.movements = []\n for i in range(0, length):\n val1 = flyaq.msg.Move()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 1\n (val1.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.pre_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.pre_actions.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.post_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.post_actions.append(val2)\n start = end\n end += 4\n (val1.altitude,) = _struct_f.unpack(str[start:end])\n _v3 = val1.target_position\n _x = _v3\n start = end\n end += 16\n (_x.latitude, _x.longitude, _x.altitude, _x.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 1\n (val1.strategy,) = _struct_b.unpack(str[start:end])\n _v4 = val1.duration\n _x = _v4\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2i.unpack(str[start:end])\n _x = val1\n start = end\n end += 13\n (_x.radius, _x.circle_altitude, _x.clockwise, _x.direction,) = _struct_2fBf.unpack(str[start:end])\n val1.clockwise = bool(val1.clockwise)\n self.movements.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.move_transitions = []\n for i in range(0, length):\n val1 = flyaq.msg.MoveTransition()\n start = end\n end += 1\n (val1.is_choice,) = _struct_B.unpack(str[start:end])\n val1.is_choice = bool(val1.is_choice)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.wait_for_slot_name = str[start:end].decode('utf-8')\n else:\n val1.wait_for_slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.from_move_name = str[start:end].decode('utf-8')\n else:\n val1.from_move_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.to_move_name = str[start:end].decode('utf-8')\n else:\n val1.to_move_name = str[start:end]\n start = end\n end += 1\n (val1.fluid,) = _struct_B.unpack(str[start:end])\n val1.fluid = bool(val1.fluid)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.condition_identifier = str[start:end].decode('utf-8')\n else:\n val1.condition_identifier = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.false_branch_move_name = str[start:end].decode('utf-8')\n else:\n val1.false_branch_move_name = str[start:end]\n self.move_transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.slot_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.slot_names.append(val1)\n start = end\n end += 1\n (self.travel_mode,) = _struct_b.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def get_move_list(self) -> typing.List[typing.Tuple[str, str]]:\n statedct = dict([(CheckFSM.ST_REPORT_FOUND, 'found'),\n (CheckFSM.ST_REPORT_MISSING, 'missing'),\n (CheckFSM.ST_REPORT_MOVED, 'moved'),\n (CheckFSM.ST_IGNORE, 'ignore'),\n (CheckFSM.ST_ERROR_STATE, 'errorstate')])\n retlst = []\n for row in self._rowlst:\n # id string\n txt = row.getcellcontent(CheckScanList._ITID_COL)\n idstr = txt.get_text()\n # action state\n action_label = row.getcellcontent(CheckScanList._ACTION_COL)\n action_int = action_label.get_current_state()\n if action_int != CheckFSM.ST_IGNORE and action_int != CheckFSM.ST_ERROR_STATE:\n retlst.append((idstr, statedct[action_int]))\n return retlst", "def parse(input):\n\n SHIP_STRING = {\"submarine\": \"sub\", \"aircraft\": \"aircraft\", \"patrol\": \"patrol\",\n \"sub\" : \"sub\"}\n #ship type\n ship_parser = re.compile(r'(?i)submarine|sub|aircraft|patrol')\n ship_match = ship_parser.search(input)\n\n if ship_match is None:\n raise ValueError(\"Invalid ship type in input\")\n\n ship_type = SHIP_STRING.get(ship_match.group())\n\n #point index\n position_parser = re.compile(r'(?i)[A-J],?\\s?([0-9]?[0-9])')\n position_match = position_parser.search(input)\n\n if position_match is None:\n raise ValueError(\"Invalid position in input, make sure you use the form (x,y); e.g. (A,2)\")\n\n position = (ord(position_match.group()[0].upper())-64,\n int(position_match.group()[1:]))\n\n #orientation, True = horizontal\n orientation_parser = re.compile(r'(?i)horizontal|vertical|horizontally|' +\n 'vertically')\n orientation_match = orientation_parser.search(input)\n if orientation_match is None:\n raise ValueError(\"Invalid orientation in input\")\n orientation = \"horizontal\" in orientation_match.group().lower()\n\n return ship_type, position, orientation", "def get_movelist(self):\n return [move for move in self._get_frame_data()]", "def _AN_to_coords(self, move: str):\n\n orig_move = move\n\n extra_info = \"\"\n\n # remove all characters that don't matter when parsing\n for pointless_char in \"x+#\":\n move = move.replace(pointless_char, \"\")\n\n # Handle castling\n if CASTLE_QUEENSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n elif CASTLE_KINGSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n\n # Pawn promotion\n if move[-2] == \"=\":\n extra_info = move[-1] if self.white_to_move else move[-1].lower()\n move = move[:-2]\n\n # Destination of move, this is the only guaranteed substring in the move\n dest_str = move[-2:]\n dest = State._EAN_coords_to_board_coords(dest_str)\n move = move[:-2]\n\n # Deduce what piece actually made the move, if there is no shown there is no pawn\n # Note in AN pieces are always uppercase and location is lowercase,\n # so this makes it simple to check if we have a piece or a location\n piece = \"P\"\n if move and move[0].isupper():\n piece = move[0]\n move = move[1:]\n if not self.white_to_move:\n piece = piece.lower()\n\n # At this point the only info the move should contain is a hint on where the piece is coming from\n loc_hint = move\n\n possible_moves = self.get_all_moves()\n possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination\n possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation\n for possible_move in possible_moves:\n row, col = State._EAN_coords_to_board_coords(possible_move[0:2])\n if self.board[row][col] == piece:\n return (row, col), dest, extra_info\n\n raise ValueError(\"Algebraic notation parsing failed, no valid move found matching the given move \" + orig_move\n + \" with board state\\n\" + str(self))", "def get_move_positions(move):\n move_positions = []\n for (xi, yi) in move.orientation:\n (x, y) = (xi + move.x, yi + move.y)\n move_positions.append((y, x))\n return move_positions", "def parse(name: unicode) -> List[unicode]:\n ...", "def parse_input(self, some_input):\n\n temp = []\n temp2 = []\n\n # breaks apart input\n for el in some_input:\n temp.append(el)\n\n if len(some_input) == 3:\n temp[1] = str(temp[1] + str(temp[2]))\n temp.pop()\n\n temp[1] = int(temp[1])\n\n # Checks boundries and input type before conversion\n dataValidation = [self.validate_move_input_and_bounderies(temp)]\n if False in dataValidation:\n print(\"Try Again\")\n return False\n\n temp2.append(self._row_conversion[temp[1]])\n temp2.append(self._col_conversion[temp[0]])\n\n return temp2", "def parse_cmds(cmds):\n input = iter(cmds.split())\n parsed = []\n while True:\n cmd = next(input, None)\n if cmd is None:\n break\n elif cmd == 'PLACE':\n parsed.append(('PLACE', next(input, None)))\n else:\n parsed.append((cmd, None))\n return parsed", "def parse_hand(self):\n self.parse_part()\n self.parse_header()\n self.parse_setup()\n self.parse_preflop()\n self.parse_flop()\n self.parse_turn()\n self.parse_river()\n self.parse_showdown()\n self.conclude_hand()", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.goal_id is None:\n self.goal_id = actionlib_msgs.msg.GoalID()\n if self.goal is None:\n self.goal = moveit_msgs.msg.MoveGroupGoal()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal_id.id = str[start:end].decode('utf-8')\n else:\n self.goal_id.id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.goal.request.workspace_parameters.header.seq, _x.goal.request.workspace_parameters.header.stamp.secs, _x.goal.request.workspace_parameters.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.workspace_parameters.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.request.workspace_parameters.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 60\n (_x.goal.request.workspace_parameters.min_corner.x, _x.goal.request.workspace_parameters.min_corner.y, _x.goal.request.workspace_parameters.min_corner.z, _x.goal.request.workspace_parameters.max_corner.x, _x.goal.request.workspace_parameters.max_corner.y, _x.goal.request.workspace_parameters.max_corner.z, _x.goal.request.start_state.joint_state.header.seq, _x.goal.request.start_state.joint_state.header.stamp.secs, _x.goal.request.start_state.joint_state.header.stamp.nsecs,) = _get_struct_6d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.start_state.joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.request.start_state.joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.joint_state.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.request.start_state.joint_state.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.request.start_state.joint_state.position = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.request.start_state.joint_state.velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.request.start_state.joint_state.effort = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 12\n (_x.goal.request.start_state.multi_dof_joint_state.header.seq, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.secs, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.request.start_state.multi_dof_joint_state.joint_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Transform()\n _v149 = val1.translation\n _x = _v149\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v150 = val1.rotation\n _x = _v150\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.twist = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Twist()\n _v151 = val1.linear\n _x = _v151\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v152 = val1.angular\n _x = _v152\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.twist.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.wrench = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Wrench()\n _v153 = val1.force\n _x = _v153\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v154 = val1.torque\n _x = _v154\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.goal.request.start_state.multi_dof_joint_state.wrench.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.start_state.attached_collision_objects = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.AttachedCollisionObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v155 = val1.object\n _v156 = _v155.header\n start = end\n end += 4\n (_v156.seq,) = _get_struct_I().unpack(str[start:end])\n _v157 = _v156.stamp\n _x = _v157\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v156.frame_id = str[start:end].decode('utf-8')\n else:\n _v156.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v155.id = str[start:end].decode('utf-8')\n else:\n _v155.id = str[start:end]\n _v158 = _v155.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v158.key = str[start:end].decode('utf-8')\n else:\n _v158.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v158.db = str[start:end].decode('utf-8')\n else:\n _v158.db = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.primitives = []\n for i in range(0, length):\n val3 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val3.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = struct.unpack(pattern, str[start:end])\n _v155.primitives.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.primitive_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v159 = val3.position\n _x = _v159\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v160 = val3.orientation\n _x = _v160\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v155.primitive_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.meshes = []\n for i in range(0, length):\n val3 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.triangles = []\n for i in range(0, length):\n val4 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val4.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val3.triangles.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val3.vertices.append(val4)\n _v155.meshes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.mesh_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v161 = val3.position\n _x = _v161\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v162 = val3.orientation\n _x = _v162\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v155.mesh_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.planes = []\n for i in range(0, length):\n val3 = shape_msgs.msg.Plane()\n start = end\n end += 32\n val3.coef = _get_struct_4d().unpack(str[start:end])\n _v155.planes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.plane_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v163 = val3.position\n _x = _v163\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v164 = val3.orientation\n _x = _v164\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v155.plane_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.subframe_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n _v155.subframe_names.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v155.subframe_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v165 = val3.position\n _x = _v165\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v166 = val3.orientation\n _x = _v166\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v155.subframe_poses.append(val3)\n start = end\n end += 1\n (_v155.operation,) = _get_struct_b().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.touch_links = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.touch_links.append(val2)\n _v167 = val1.detach_posture\n _v168 = _v167.header\n start = end\n end += 4\n (_v168.seq,) = _get_struct_I().unpack(str[start:end])\n _v169 = _v168.stamp\n _x = _v169\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v168.frame_id = str[start:end].decode('utf-8')\n else:\n _v168.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v167.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n _v167.joint_names.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v167.points = []\n for i in range(0, length):\n val3 = trajectory_msgs.msg.JointTrajectoryPoint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.positions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.velocities = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.accelerations = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.effort = struct.unpack(pattern, str[start:end])\n _v170 = val3.time_from_start\n _x = _v170\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])\n _v167.points.append(val3)\n start = end\n end += 8\n (val1.weight,) = _get_struct_d().unpack(str[start:end])\n self.goal.request.start_state.attached_collision_objects.append(val1)\n start = end\n end += 1\n (self.goal.request.start_state.is_diff,) = _get_struct_B().unpack(str[start:end])\n self.goal.request.start_state.is_diff = bool(self.goal.request.start_state.is_diff)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.goal_constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.Constraints()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.joint_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.JointConstraint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.joint_name = str[start:end].decode('utf-8')\n else:\n val2.joint_name = str[start:end]\n _x = val2\n start = end\n end += 32\n (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n val1.joint_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.position_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.PositionConstraint()\n _v171 = val2.header\n start = end\n end += 4\n (_v171.seq,) = _get_struct_I().unpack(str[start:end])\n _v172 = _v171.stamp\n _x = _v172\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v171.frame_id = str[start:end].decode('utf-8')\n else:\n _v171.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.link_name = str[start:end].decode('utf-8')\n else:\n val2.link_name = str[start:end]\n _v173 = val2.target_point_offset\n _x = _v173\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v174 = val2.constraint_region\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v174.primitives = []\n for i in range(0, length):\n val4 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val4.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val4.dimensions = struct.unpack(pattern, str[start:end])\n _v174.primitives.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v174.primitive_poses = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Pose()\n _v175 = val4.position\n _x = _v175\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v176 = val4.orientation\n _x = _v176\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v174.primitive_poses.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v174.meshes = []\n for i in range(0, length):\n val4 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val4.triangles = []\n for i in range(0, length):\n val5 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val5.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val4.triangles.append(val5)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val4.vertices = []\n for i in range(0, length):\n val5 = geometry_msgs.msg.Point()\n _x = val5\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val4.vertices.append(val5)\n _v174.meshes.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v174.mesh_poses = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Pose()\n _v177 = val4.position\n _x = _v177\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v178 = val4.orientation\n _x = _v178\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v174.mesh_poses.append(val4)\n start = end\n end += 8\n (val2.weight,) = _get_struct_d().unpack(str[start:end])\n val1.position_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.orientation_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.OrientationConstraint()\n _v179 = val2.header\n start = end\n end += 4\n (_v179.seq,) = _get_struct_I().unpack(str[start:end])\n _v180 = _v179.stamp\n _x = _v180\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v179.frame_id = str[start:end].decode('utf-8')\n else:\n _v179.frame_id = str[start:end]\n _v181 = val2.orientation\n _x = _v181\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.link_name = str[start:end].decode('utf-8')\n else:\n val2.link_name = str[start:end]\n _x = val2\n start = end\n end += 32\n (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n val1.orientation_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.visibility_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.VisibilityConstraint()\n start = end\n end += 8\n (val2.target_radius,) = _get_struct_d().unpack(str[start:end])\n _v182 = val2.target_pose\n _v183 = _v182.header\n start = end\n end += 4\n (_v183.seq,) = _get_struct_I().unpack(str[start:end])\n _v184 = _v183.stamp\n _x = _v184\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v183.frame_id = str[start:end].decode('utf-8')\n else:\n _v183.frame_id = str[start:end]\n _v185 = _v182.pose\n _v186 = _v185.position\n _x = _v186\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v187 = _v185.orientation\n _x = _v187\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.cone_sides,) = _get_struct_i().unpack(str[start:end])\n _v188 = val2.sensor_pose\n _v189 = _v188.header\n start = end\n end += 4\n (_v189.seq,) = _get_struct_I().unpack(str[start:end])\n _v190 = _v189.stamp\n _x = _v190\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v189.frame_id = str[start:end].decode('utf-8')\n else:\n _v189.frame_id = str[start:end]\n _v191 = _v188.pose\n _v192 = _v191.position\n _x = _v192\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v193 = _v191.orientation\n _x = _v193\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _x = val2\n start = end\n end += 25\n (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end])\n val1.visibility_constraints.append(val2)\n self.goal.request.goal_constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.path_constraints.name = str[start:end].decode('utf-8')\n else:\n self.goal.request.path_constraints.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.path_constraints.joint_constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.JointConstraint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.joint_name = str[start:end].decode('utf-8')\n else:\n val1.joint_name = str[start:end]\n _x = val1\n start = end\n end += 32\n (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n self.goal.request.path_constraints.joint_constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.path_constraints.position_constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.PositionConstraint()\n _v194 = val1.header\n start = end\n end += 4\n (_v194.seq,) = _get_struct_I().unpack(str[start:end])\n _v195 = _v194.stamp\n _x = _v195\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v194.frame_id = str[start:end].decode('utf-8')\n else:\n _v194.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v196 = val1.target_point_offset\n _x = _v196\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v197 = val1.constraint_region\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v197.primitives = []\n for i in range(0, length):\n val3 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val3.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = struct.unpack(pattern, str[start:end])\n _v197.primitives.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v197.primitive_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v198 = val3.position\n _x = _v198\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v199 = val3.orientation\n _x = _v199\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v197.primitive_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v197.meshes = []\n for i in range(0, length):\n val3 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.triangles = []\n for i in range(0, length):\n val4 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val4.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val3.triangles.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val3.vertices.append(val4)\n _v197.meshes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v197.mesh_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v200 = val3.position\n _x = _v200\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v201 = val3.orientation\n _x = _v201\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v197.mesh_poses.append(val3)\n start = end\n end += 8\n (val1.weight,) = _get_struct_d().unpack(str[start:end])\n self.goal.request.path_constraints.position_constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.path_constraints.orientation_constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.OrientationConstraint()\n _v202 = val1.header\n start = end\n end += 4\n (_v202.seq,) = _get_struct_I().unpack(str[start:end])\n _v203 = _v202.stamp\n _x = _v203\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v202.frame_id = str[start:end].decode('utf-8')\n else:\n _v202.frame_id = str[start:end]\n _v204 = val1.orientation\n _x = _v204\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _x = val1\n start = end\n end += 32\n (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n self.goal.request.path_constraints.orientation_constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.path_constraints.visibility_constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.VisibilityConstraint()\n start = end\n end += 8\n (val1.target_radius,) = _get_struct_d().unpack(str[start:end])\n _v205 = val1.target_pose\n _v206 = _v205.header\n start = end\n end += 4\n (_v206.seq,) = _get_struct_I().unpack(str[start:end])\n _v207 = _v206.stamp\n _x = _v207\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v206.frame_id = str[start:end].decode('utf-8')\n else:\n _v206.frame_id = str[start:end]\n _v208 = _v205.pose\n _v209 = _v208.position\n _x = _v209\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v210 = _v208.orientation\n _x = _v210\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val1.cone_sides,) = _get_struct_i().unpack(str[start:end])\n _v211 = val1.sensor_pose\n _v212 = _v211.header\n start = end\n end += 4\n (_v212.seq,) = _get_struct_I().unpack(str[start:end])\n _v213 = _v212.stamp\n _x = _v213\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v212.frame_id = str[start:end].decode('utf-8')\n else:\n _v212.frame_id = str[start:end]\n _v214 = _v211.pose\n _v215 = _v214.position\n _x = _v215\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v216 = _v214.orientation\n _x = _v216\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _x = val1\n start = end\n end += 25\n (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end])\n self.goal.request.path_constraints.visibility_constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.trajectory_constraints.constraints = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.Constraints()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.joint_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.JointConstraint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.joint_name = str[start:end].decode('utf-8')\n else:\n val2.joint_name = str[start:end]\n _x = val2\n start = end\n end += 32\n (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n val1.joint_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.position_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.PositionConstraint()\n _v217 = val2.header\n start = end\n end += 4\n (_v217.seq,) = _get_struct_I().unpack(str[start:end])\n _v218 = _v217.stamp\n _x = _v218\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v217.frame_id = str[start:end].decode('utf-8')\n else:\n _v217.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.link_name = str[start:end].decode('utf-8')\n else:\n val2.link_name = str[start:end]\n _v219 = val2.target_point_offset\n _x = _v219\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v220 = val2.constraint_region\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v220.primitives = []\n for i in range(0, length):\n val4 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val4.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val4.dimensions = struct.unpack(pattern, str[start:end])\n _v220.primitives.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v220.primitive_poses = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Pose()\n _v221 = val4.position\n _x = _v221\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v222 = val4.orientation\n _x = _v222\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v220.primitive_poses.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v220.meshes = []\n for i in range(0, length):\n val4 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val4.triangles = []\n for i in range(0, length):\n val5 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val5.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val4.triangles.append(val5)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val4.vertices = []\n for i in range(0, length):\n val5 = geometry_msgs.msg.Point()\n _x = val5\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val4.vertices.append(val5)\n _v220.meshes.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v220.mesh_poses = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Pose()\n _v223 = val4.position\n _x = _v223\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v224 = val4.orientation\n _x = _v224\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v220.mesh_poses.append(val4)\n start = end\n end += 8\n (val2.weight,) = _get_struct_d().unpack(str[start:end])\n val1.position_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.orientation_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.OrientationConstraint()\n _v225 = val2.header\n start = end\n end += 4\n (_v225.seq,) = _get_struct_I().unpack(str[start:end])\n _v226 = _v225.stamp\n _x = _v226\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v225.frame_id = str[start:end].decode('utf-8')\n else:\n _v225.frame_id = str[start:end]\n _v227 = val2.orientation\n _x = _v227\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.link_name = str[start:end].decode('utf-8')\n else:\n val2.link_name = str[start:end]\n _x = val2\n start = end\n end += 32\n (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end])\n val1.orientation_constraints.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.visibility_constraints = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.VisibilityConstraint()\n start = end\n end += 8\n (val2.target_radius,) = _get_struct_d().unpack(str[start:end])\n _v228 = val2.target_pose\n _v229 = _v228.header\n start = end\n end += 4\n (_v229.seq,) = _get_struct_I().unpack(str[start:end])\n _v230 = _v229.stamp\n _x = _v230\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v229.frame_id = str[start:end].decode('utf-8')\n else:\n _v229.frame_id = str[start:end]\n _v231 = _v228.pose\n _v232 = _v231.position\n _x = _v232\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v233 = _v231.orientation\n _x = _v233\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.cone_sides,) = _get_struct_i().unpack(str[start:end])\n _v234 = val2.sensor_pose\n _v235 = _v234.header\n start = end\n end += 4\n (_v235.seq,) = _get_struct_I().unpack(str[start:end])\n _v236 = _v235.stamp\n _x = _v236\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v235.frame_id = str[start:end].decode('utf-8')\n else:\n _v235.frame_id = str[start:end]\n _v237 = _v234.pose\n _v238 = _v237.position\n _x = _v238\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v239 = _v237.orientation\n _x = _v239\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _x = val2\n start = end\n end += 25\n (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end])\n val1.visibility_constraints.append(val2)\n self.goal.request.trajectory_constraints.constraints.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.request.reference_trajectories = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.GenericTrajectory()\n _v240 = val1.header\n start = end\n end += 4\n (_v240.seq,) = _get_struct_I().unpack(str[start:end])\n _v241 = _v240.stamp\n _x = _v241\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v240.frame_id = str[start:end].decode('utf-8')\n else:\n _v240.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.joint_trajectory = []\n for i in range(0, length):\n val2 = trajectory_msgs.msg.JointTrajectory()\n _v242 = val2.header\n start = end\n end += 4\n (_v242.seq,) = _get_struct_I().unpack(str[start:end])\n _v243 = _v242.stamp\n _x = _v243\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v242.frame_id = str[start:end].decode('utf-8')\n else:\n _v242.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.joint_names.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.points = []\n for i in range(0, length):\n val3 = trajectory_msgs.msg.JointTrajectoryPoint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.positions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.velocities = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.accelerations = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.effort = struct.unpack(pattern, str[start:end])\n _v244 = val3.time_from_start\n _x = _v244\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])\n val2.points.append(val3)\n val1.joint_trajectory.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.cartesian_trajectory = []\n for i in range(0, length):\n val2 = moveit_msgs.msg.CartesianTrajectory()\n _v245 = val2.header\n start = end\n end += 4\n (_v245.seq,) = _get_struct_I().unpack(str[start:end])\n _v246 = _v245.stamp\n _x = _v246\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v245.frame_id = str[start:end].decode('utf-8')\n else:\n _v245.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.tracked_frame = str[start:end].decode('utf-8')\n else:\n val2.tracked_frame = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.points = []\n for i in range(0, length):\n val3 = moveit_msgs.msg.CartesianTrajectoryPoint()\n _v247 = val3.point\n _v248 = _v247.pose\n _v249 = _v248.position\n _x = _v249\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v250 = _v248.orientation\n _x = _v250\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v251 = _v247.velocity\n _v252 = _v251.linear\n _x = _v252\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v253 = _v251.angular\n _x = _v253\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v254 = _v247.acceleration\n _v255 = _v254.linear\n _x = _v255\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v256 = _v254.angular\n _x = _v256\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v257 = val3.time_from_start\n _x = _v257\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])\n val2.points.append(val3)\n val1.cartesian_trajectory.append(val2)\n self.goal.request.reference_trajectories.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.planner_id = str[start:end].decode('utf-8')\n else:\n self.goal.request.planner_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.request.group_name = str[start:end].decode('utf-8')\n else:\n self.goal.request.group_name = str[start:end]\n _x = self\n start = end\n end += 28\n (_x.goal.request.num_planning_attempts, _x.goal.request.allowed_planning_time, _x.goal.request.max_velocity_scaling_factor, _x.goal.request.max_acceleration_scaling_factor,) = _get_struct_i3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.name = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.name = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 12\n (_x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Transform()\n _v258 = val1.translation\n _x = _v258\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v259 = val1.rotation\n _x = _v259\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Twist()\n _v260 = val1.linear\n _x = _v260\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v261 = val1.angular\n _x = _v261\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Wrench()\n _v262 = val1.force\n _x = _v262\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v263 = val1.torque\n _x = _v263\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.AttachedCollisionObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v264 = val1.object\n _v265 = _v264.header\n start = end\n end += 4\n (_v265.seq,) = _get_struct_I().unpack(str[start:end])\n _v266 = _v265.stamp\n _x = _v266\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v265.frame_id = str[start:end].decode('utf-8')\n else:\n _v265.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v264.id = str[start:end].decode('utf-8')\n else:\n _v264.id = str[start:end]\n _v267 = _v264.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v267.key = str[start:end].decode('utf-8')\n else:\n _v267.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v267.db = str[start:end].decode('utf-8')\n else:\n _v267.db = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.primitives = []\n for i in range(0, length):\n val3 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val3.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = struct.unpack(pattern, str[start:end])\n _v264.primitives.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.primitive_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v268 = val3.position\n _x = _v268\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v269 = val3.orientation\n _x = _v269\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v264.primitive_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.meshes = []\n for i in range(0, length):\n val3 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.triangles = []\n for i in range(0, length):\n val4 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val4.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val3.triangles.append(val4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val3.vertices.append(val4)\n _v264.meshes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.mesh_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v270 = val3.position\n _x = _v270\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v271 = val3.orientation\n _x = _v271\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v264.mesh_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.planes = []\n for i in range(0, length):\n val3 = shape_msgs.msg.Plane()\n start = end\n end += 32\n val3.coef = _get_struct_4d().unpack(str[start:end])\n _v264.planes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.plane_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v272 = val3.position\n _x = _v272\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v273 = val3.orientation\n _x = _v273\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v264.plane_poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.subframe_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n _v264.subframe_names.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v264.subframe_poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v274 = val3.position\n _x = _v274\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v275 = val3.orientation\n _x = _v275\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v264.subframe_poses.append(val3)\n start = end\n end += 1\n (_v264.operation,) = _get_struct_b().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.touch_links = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.touch_links.append(val2)\n _v276 = val1.detach_posture\n _v277 = _v276.header\n start = end\n end += 4\n (_v277.seq,) = _get_struct_I().unpack(str[start:end])\n _v278 = _v277.stamp\n _x = _v278\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v277.frame_id = str[start:end].decode('utf-8')\n else:\n _v277.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v276.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n _v276.joint_names.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v276.points = []\n for i in range(0, length):\n val3 = trajectory_msgs.msg.JointTrajectoryPoint()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.positions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.velocities = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.accelerations = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.effort = struct.unpack(pattern, str[start:end])\n _v279 = val3.time_from_start\n _x = _v279\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])\n _v276.points.append(val3)\n start = end\n end += 8\n (val1.weight,) = _get_struct_d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects.append(val1)\n start = end\n end += 1\n (self.goal.planning_options.planning_scene_diff.robot_state.is_diff,) = _get_struct_B().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.robot_state.is_diff = bool(self.goal.planning_options.planning_scene_diff.robot_state.is_diff)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.fixed_frame_transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.TransformStamped()\n _v280 = val1.header\n start = end\n end += 4\n (_v280.seq,) = _get_struct_I().unpack(str[start:end])\n _v281 = _v280.stamp\n _x = _v281\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v280.frame_id = str[start:end].decode('utf-8')\n else:\n _v280.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.child_frame_id = str[start:end].decode('utf-8')\n else:\n val1.child_frame_id = str[start:end]\n _v282 = val1.transform\n _v283 = _v282.translation\n _x = _v283\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v284 = _v282.rotation\n _x = _v284\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.fixed_frame_transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.AllowedCollisionEntry()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n end += struct.calcsize(pattern)\n val1.enabled = struct.unpack(pattern, str[start:end])\n val1.enabled = map(bool, val1.enabled)\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = struct.unpack(pattern, str[start:end])\n self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = map(bool, self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.link_padding = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.LinkPadding()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n start = end\n end += 8\n (val1.padding,) = _get_struct_d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.link_padding.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.link_scale = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.LinkScale()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n start = end\n end += 8\n (val1.scale,) = _get_struct_d().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.link_scale.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.object_colors = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.ObjectColor()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n _v285 = val1.color\n _x = _v285\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.object_colors.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.world.collision_objects = []\n for i in range(0, length):\n val1 = moveit_msgs.msg.CollisionObject()\n _v286 = val1.header\n start = end\n end += 4\n (_v286.seq,) = _get_struct_I().unpack(str[start:end])\n _v287 = _v286.stamp\n _x = _v287\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v286.frame_id = str[start:end].decode('utf-8')\n else:\n _v286.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n _v288 = val1.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v288.key = str[start:end].decode('utf-8')\n else:\n _v288.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v288.db = str[start:end].decode('utf-8')\n else:\n _v288.db = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.primitives = []\n for i in range(0, length):\n val2 = shape_msgs.msg.SolidPrimitive()\n start = end\n end += 1\n (val2.type,) = _get_struct_B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val2.dimensions = struct.unpack(pattern, str[start:end])\n val1.primitives.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.primitive_poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v289 = val2.position\n _x = _v289\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v290 = val2.orientation\n _x = _v290\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n val1.primitive_poses.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.meshes = []\n for i in range(0, length):\n val2 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.triangles = []\n for i in range(0, length):\n val3 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val3.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val2.triangles.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val2.vertices.append(val3)\n val1.meshes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.mesh_poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v291 = val2.position\n _x = _v291\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v292 = val2.orientation\n _x = _v292\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n val1.mesh_poses.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.planes = []\n for i in range(0, length):\n val2 = shape_msgs.msg.Plane()\n start = end\n end += 32\n val2.coef = _get_struct_4d().unpack(str[start:end])\n val1.planes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.plane_poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v293 = val2.position\n _x = _v293\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v294 = val2.orientation\n _x = _v294\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n val1.plane_poses.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.subframe_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.subframe_names.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.subframe_poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v295 = val2.position\n _x = _v295\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v296 = val2.orientation\n _x = _v296\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n val1.subframe_poses.append(val2)\n start = end\n end += 1\n (val1.operation,) = _get_struct_b().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.world.collision_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.goal.planning_options.planning_scene_diff.world.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.w, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end]\n start = end\n end += 1\n (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary,) = _get_struct_B().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary = bool(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end].decode('utf-8')\n else:\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end]\n start = end\n end += 8\n (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.resolution,) = _get_struct_d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sb'%length\n start = end\n end += struct.calcsize(pattern)\n self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 28\n (_x.goal.planning_options.planning_scene_diff.is_diff, _x.goal.planning_options.plan_only, _x.goal.planning_options.look_around, _x.goal.planning_options.look_around_attempts, _x.goal.planning_options.max_safe_execution_cost, _x.goal.planning_options.replan, _x.goal.planning_options.replan_attempts, _x.goal.planning_options.replan_delay,) = _get_struct_3BidBid().unpack(str[start:end])\n self.goal.planning_options.planning_scene_diff.is_diff = bool(self.goal.planning_options.planning_scene_diff.is_diff)\n self.goal.planning_options.plan_only = bool(self.goal.planning_options.plan_only)\n self.goal.planning_options.look_around = bool(self.goal.planning_options.look_around)\n self.goal.planning_options.replan = bool(self.goal.planning_options.replan)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.home is None:\n self.home = flyaq.msg.Coordinate()\n if self.movements is None:\n self.movements = None\n if self.move_transitions is None:\n self.move_transitions = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type_name = str[start:end].decode('utf-8')\n else:\n self.type_name = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.movements = []\n for i in range(0, length):\n val1 = flyaq.msg.Move()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 1\n (val1.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.pre_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.pre_actions.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.post_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.post_actions.append(val2)\n start = end\n end += 4\n (val1.altitude,) = _struct_f.unpack(str[start:end])\n _v7 = val1.target_position\n _x = _v7\n start = end\n end += 16\n (_x.latitude, _x.longitude, _x.altitude, _x.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 1\n (val1.strategy,) = _struct_b.unpack(str[start:end])\n _v8 = val1.duration\n _x = _v8\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2i.unpack(str[start:end])\n _x = val1\n start = end\n end += 13\n (_x.radius, _x.circle_altitude, _x.clockwise, _x.direction,) = _struct_2fBf.unpack(str[start:end])\n val1.clockwise = bool(val1.clockwise)\n self.movements.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.move_transitions = []\n for i in range(0, length):\n val1 = flyaq.msg.MoveTransition()\n start = end\n end += 1\n (val1.is_choice,) = _struct_B.unpack(str[start:end])\n val1.is_choice = bool(val1.is_choice)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.wait_for_slot_name = str[start:end].decode('utf-8')\n else:\n val1.wait_for_slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.from_move_name = str[start:end].decode('utf-8')\n else:\n val1.from_move_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.to_move_name = str[start:end].decode('utf-8')\n else:\n val1.to_move_name = str[start:end]\n start = end\n end += 1\n (val1.fluid,) = _struct_B.unpack(str[start:end])\n val1.fluid = bool(val1.fluid)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.condition_identifier = str[start:end].decode('utf-8')\n else:\n val1.condition_identifier = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.false_branch_move_name = str[start:end].decode('utf-8')\n else:\n val1.false_branch_move_name = str[start:end]\n self.move_transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.slot_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.slot_names.append(val1)\n start = end\n end += 1\n (self.travel_mode,) = _struct_b.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.planning_scene is None:\n self.planning_scene = arm_navigation_msgs.msg.PlanningScene()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.planning_scene.robot_state.joint_state.header.seq, _x.planning_scene.robot_state.joint_state.header.stamp.secs, _x.planning_scene.robot_state.joint_state.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene.robot_state.joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.robot_state.joint_state.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene.robot_state.joint_state.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene.robot_state.joint_state.position = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene.robot_state.joint_state.velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene.robot_state.joint_state.effort = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 8\n (_x.planning_scene.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene.robot_state.multi_dof_joint_state.stamp.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.robot_state.multi_dof_joint_state.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene.robot_state.multi_dof_joint_state.joint_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.robot_state.multi_dof_joint_state.frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene.robot_state.multi_dof_joint_state.frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.robot_state.multi_dof_joint_state.poses = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v141 = val1.position\n _x = _v141\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v142 = val1.orientation\n _x = _v142\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene.robot_state.multi_dof_joint_state.poses.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.fixed_frame_transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.TransformStamped()\n _v143 = val1.header\n start = end\n end += 4\n (_v143.seq,) = _struct_I.unpack(str[start:end])\n _v144 = _v143.stamp\n _x = _v144\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v143.frame_id = str[start:end].decode('utf-8')\n else:\n _v143.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.child_frame_id = str[start:end].decode('utf-8')\n else:\n val1.child_frame_id = str[start:end]\n _v145 = val1.transform\n _v146 = _v145.translation\n _x = _v146\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v147 = _v145.rotation\n _x = _v147\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene.fixed_frame_transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.allowed_collision_matrix.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene.allowed_collision_matrix.link_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.allowed_collision_matrix.entries = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedCollisionEntry()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n end += struct.calcsize(pattern)\n val1.enabled = struct.unpack(pattern, str[start:end])\n val1.enabled = map(bool, val1.enabled)\n self.planning_scene.allowed_collision_matrix.entries.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.allowed_contacts = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedContactSpecification()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _v148 = val1.shape\n start = end\n end += 1\n (_v148.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n _v148.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n _v148.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v148.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v148.vertices.append(val3)\n _v149 = val1.pose_stamped\n _v150 = _v149.header\n start = end\n end += 4\n (_v150.seq,) = _struct_I.unpack(str[start:end])\n _v151 = _v150.stamp\n _x = _v151\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v150.frame_id = str[start:end].decode('utf-8')\n else:\n _v150.frame_id = str[start:end]\n _v152 = _v149.pose\n _v153 = _v152.position\n _x = _v153\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v154 = _v152.orientation\n _x = _v154\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.link_names.append(val2)\n start = end\n end += 8\n (val1.penetration_depth,) = _struct_d.unpack(str[start:end])\n self.planning_scene.allowed_contacts.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.link_padding = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.LinkPadding()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n start = end\n end += 8\n (val1.padding,) = _struct_d.unpack(str[start:end])\n self.planning_scene.link_padding.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.CollisionObject()\n _v155 = val1.header\n start = end\n end += 4\n (_v155.seq,) = _struct_I.unpack(str[start:end])\n _v156 = _v155.stamp\n _x = _v156\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v155.frame_id = str[start:end].decode('utf-8')\n else:\n _v155.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (val1.padding,) = _struct_f.unpack(str[start:end])\n _v157 = val1.operation\n start = end\n end += 1\n (_v157.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.shapes = []\n for i in range(0, length):\n val2 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val2.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val2.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val2.vertices.append(val3)\n val1.shapes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v158 = val2.position\n _x = _v158\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v159 = val2.orientation\n _x = _v159\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n val1.poses.append(val2)\n self.planning_scene.collision_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.attached_collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AttachedCollisionObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v160 = val1.object\n _v161 = _v160.header\n start = end\n end += 4\n (_v161.seq,) = _struct_I.unpack(str[start:end])\n _v162 = _v161.stamp\n _x = _v162\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v161.frame_id = str[start:end].decode('utf-8')\n else:\n _v161.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v160.id = str[start:end].decode('utf-8')\n else:\n _v160.id = str[start:end]\n start = end\n end += 4\n (_v160.padding,) = _struct_f.unpack(str[start:end])\n _v163 = _v160.operation\n start = end\n end += 1\n (_v163.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v160.shapes = []\n for i in range(0, length):\n val3 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val3.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val3.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val3.vertices.append(val4)\n _v160.shapes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v160.poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v164 = val3.position\n _x = _v164\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v165 = val3.orientation\n _x = _v165\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n _v160.poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.touch_links = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.touch_links.append(val2)\n self.planning_scene.attached_collision_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.planning_scene.collision_map.header.seq, _x.planning_scene.collision_map.header.stamp.secs, _x.planning_scene.collision_map.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene.collision_map.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene.collision_map.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene.collision_map.boxes = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.OrientedBoundingBox()\n _v166 = val1.center\n _x = _v166\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v167 = val1.extents\n _x = _v167\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v168 = val1.axis\n _x = _v168\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n start = end\n end += 4\n (val1.angle,) = _struct_f.unpack(str[start:end])\n self.planning_scene.collision_map.boxes.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _sequence(game_record):\n seq = []\n for item in game_record.get_main_sequence():\n color, move = item.get_move()\n # color == None is entries that are not actual game play\n # move == None is a pass, which in theory we could try to\n # predict, but not yet\n if color is not None and move is not None:\n seq.append((color, move))\n return seq", "def parse(input_):\n map_ = np.array([[char for char in line] for line in input_.splitlines()])\n units = []\n for unit_type in (Goblin, Elf):\n for pos in np.argwhere(map_ == unit_type.char):\n units.append(unit_type(tuple(pos), map_))\n map_[pos[0], pos[1]] = \".\"\n # make map_ immutable\n map_.flags.writeable = False\n return units", "def parse_phase (self, ph):\n\n l = ph.split ()\n dist = l[0] # in degrees\n name = l[2]\n time = l[3]\n\n #print (time)\n\n return [name, time, dist]", "def parse(m: tuple):\n directions = ['across', 'down']\n word = list() # Used to store the word we're working on\n word_list = list() # Stores the growing list of words\n\n for direction in directions:\n\n for y in range(len(m)): # number of rows (size of outer tuple)\n # word = list() # todo: can probably be deleted as word is reset at end of row already\n\n for x in range(len(m[0])): # number of columns (size of inner tuple)\n\n if direction == 'down': # flips so x is rows and y is columns if we are reading down\n x, y = y, x\n\n square = matrix[y][x]\n\n if square != 0: # Encountered a letter\n if len(word) == 0:\n word.append((direction, y, x), ) # If start of a word, start with direction & coordinates\n word.append(square) # First letter\n\n if direction == 'down': # flips x and y back before we reach the loop tests\n x, y = y, x\n\n if square == 0 or x == 10: # Encountered a blank space\n if len(word) > 3: # Then we collected a word and have come to the end of it\n word_list.append(word) # Add it to the list of words\n word = list() # Reset word\n\n word = list()\n\n return word_list", "def read_action(line):\n try:\n reg_action = re.search(r'(.+): ([a-z]+)', line)\n player_pseudo = reg_action.group(1)\n action_type = define_action(reg_action.group(2))\n if action_type == ActionType.CHECK:\n return [player_pseudo, action_type, 0]\n elif action_type == ActionType.FOLD:\n return [player_pseudo, action_type, 0]\n elif action_type == ActionType.CALL:\n amount = float(re.search(' €?([0-9-.]+)', line).group(1))\n return [player_pseudo, action_type, amount]\n elif action_type == ActionType.BET:\n amount = float(re.search(' €?([0-9-.]+)', line).group(1))\n return [player_pseudo, action_type, amount]\n elif action_type == ActionType.RAISE:\n amount = float(re.search('to €?([0-9-.]+)', line).group(1))\n return [player_pseudo, action_type, amount]\n else:\n return [None, None, None]\n\n except AttributeError:\n pass", "def deserialize(self, str):\n try:\n if self.planning_scene_diff is None:\n self.planning_scene_diff = arm_navigation_msgs.msg.PlanningScene()\n if self.operations is None:\n self.operations = arm_navigation_msgs.msg.OrderedCollisionOperations()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.planning_scene_diff.robot_state.joint_state.header.seq, _x.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.joint_state.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.joint_state.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.position = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.effort = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 8\n (_x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.poses = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v29 = val1.position\n _x = _v29\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v30 = val1.orientation\n _x = _v30\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.poses.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.fixed_frame_transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.TransformStamped()\n _v31 = val1.header\n start = end\n end += 4\n (_v31.seq,) = _struct_I.unpack(str[start:end])\n _v32 = _v31.stamp\n _x = _v32\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v31.frame_id = str[start:end].decode('utf-8')\n else:\n _v31.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.child_frame_id = str[start:end].decode('utf-8')\n else:\n val1.child_frame_id = str[start:end]\n _v33 = val1.transform\n _v34 = _v33.translation\n _x = _v34\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v35 = _v33.rotation\n _x = _v35\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene_diff.fixed_frame_transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_collision_matrix.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.allowed_collision_matrix.link_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_collision_matrix.entries = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedCollisionEntry()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n end += struct.calcsize(pattern)\n val1.enabled = struct.unpack(pattern, str[start:end])\n val1.enabled = map(bool, val1.enabled)\n self.planning_scene_diff.allowed_collision_matrix.entries.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_contacts = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedContactSpecification()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _v36 = val1.shape\n start = end\n end += 1\n (_v36.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n _v36.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n _v36.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v36.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v36.vertices.append(val3)\n _v37 = val1.pose_stamped\n _v38 = _v37.header\n start = end\n end += 4\n (_v38.seq,) = _struct_I.unpack(str[start:end])\n _v39 = _v38.stamp\n _x = _v39\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v38.frame_id = str[start:end].decode('utf-8')\n else:\n _v38.frame_id = str[start:end]\n _v40 = _v37.pose\n _v41 = _v40.position\n _x = _v41\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v42 = _v40.orientation\n _x = _v42\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.link_names.append(val2)\n start = end\n end += 8\n (val1.penetration_depth,) = _struct_d.unpack(str[start:end])\n self.planning_scene_diff.allowed_contacts.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.link_padding = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.LinkPadding()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n start = end\n end += 8\n (val1.padding,) = _struct_d.unpack(str[start:end])\n self.planning_scene_diff.link_padding.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.CollisionObject()\n _v43 = val1.header\n start = end\n end += 4\n (_v43.seq,) = _struct_I.unpack(str[start:end])\n _v44 = _v43.stamp\n _x = _v44\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v43.frame_id = str[start:end].decode('utf-8')\n else:\n _v43.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (val1.padding,) = _struct_f.unpack(str[start:end])\n _v45 = val1.operation\n start = end\n end += 1\n (_v45.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.shapes = []\n for i in range(0, length):\n val2 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val2.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val2.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val2.vertices.append(val3)\n val1.shapes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v46 = val2.position\n _x = _v46\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v47 = val2.orientation\n _x = _v47\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n val1.poses.append(val2)\n self.planning_scene_diff.collision_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.attached_collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AttachedCollisionObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v48 = val1.object\n _v49 = _v48.header\n start = end\n end += 4\n (_v49.seq,) = _struct_I.unpack(str[start:end])\n _v50 = _v49.stamp\n _x = _v50\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.frame_id = str[start:end].decode('utf-8')\n else:\n _v49.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v48.id = str[start:end].decode('utf-8')\n else:\n _v48.id = str[start:end]\n start = end\n end += 4\n (_v48.padding,) = _struct_f.unpack(str[start:end])\n _v51 = _v48.operation\n start = end\n end += 1\n (_v51.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v48.shapes = []\n for i in range(0, length):\n val3 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val3.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val3.triangles = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val3.vertices.append(val4)\n _v48.shapes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v48.poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v52 = val3.position\n _x = _v52\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v53 = val3.orientation\n _x = _v53\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n _v48.poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.touch_links = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.touch_links.append(val2)\n self.planning_scene_diff.attached_collision_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.planning_scene_diff.collision_map.header.seq, _x.planning_scene_diff.collision_map.header.stamp.secs, _x.planning_scene_diff.collision_map.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene_diff.collision_map.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene_diff.collision_map.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.collision_map.boxes = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.OrientedBoundingBox()\n _v54 = val1.center\n _x = _v54\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v55 = val1.extents\n _x = _v55\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v56 = val1.axis\n _x = _v56\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n start = end\n end += 4\n (val1.angle,) = _struct_f.unpack(str[start:end])\n self.planning_scene_diff.collision_map.boxes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.operations.collision_operations = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.CollisionOperation()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object1 = str[start:end].decode('utf-8')\n else:\n val1.object1 = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object2 = str[start:end].decode('utf-8')\n else:\n val1.object2 = str[start:end]\n _x = val1\n start = end\n end += 12\n (_x.penetration_distance, _x.operation,) = _struct_di.unpack(str[start:end])\n self.operations.collision_operations.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_pieces(self):\n # pieces_dict = {}\n self.gen = PieceGenerator(self)\n for piece_string in self.pieces_string.split(','):\n piece_dict = dict(zip(self.piece_cols, piece_string.split('/')))\n piece_dict[\"move_pattern\"] = [int(num) for num in piece_dict[\"move_pattern\"]]\n piece_dict[\"jump_pattern\"] = [int(num) for num in piece_dict[\"jump_pattern\"]]\n piece_dict[\"lose_on_capture\"] = piece_dict[\"lose_on_capture\"].isupper()\n self.gen.add_piece(piece_dict)\n # pieces_dict[piece_string[0]] = PieceGenerator(self, piece_string)\n # self.pieces_dict = pieces_dict" ]
[ "0.62569845", "0.6113737", "0.60215795", "0.6006309", "0.5862436", "0.5765197", "0.5751924", "0.5724444", "0.5700855", "0.5651036", "0.562076", "0.5336469", "0.52793014", "0.5259579", "0.5231833", "0.51468825", "0.5125433", "0.5125381", "0.5124578", "0.5118541", "0.51147455", "0.50872326", "0.5067744", "0.5064953", "0.5042147", "0.5012534", "0.50087965", "0.5005631", "0.49894875", "0.49735954" ]
0.7860054
0
Simulates a smooth mouse drag
def drag(source, dest, speed=1000): m = PyMouse() m.press(*source) time.sleep(0.1) # number of intermediate movements to make for our given speed npoints = int(sqrt((dest[0]-source[0])**2 + (dest[1]-source[1])**2 ) / (speed/1000)) for i in range(npoints): x = int(source[0] + ((dest[0]-source[0])/npoints)*i) y = int(source[1] + ((dest[1]-source[1])/npoints)*i) m.move(x,y) time.sleep(0.001) m.release(*dest)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mouseDragged(self, point, delta):\n pass", "def _on_mouse(self, event):\n x, y = event.GetPosition()\n if self._drag_mode == DepthCanvas.SASH_DRAG_NONE: \n self._canvas_hit_test(x, y) \n if event.LeftDown():\n self.start_dragging(y)\n elif self._drag_mode == DepthCanvas.SASH_DRAG_DRAGGING:\n if event.LeftIsDown():\n self.drag_it(y) \n elif event.LeftUp():\n self.end_dragging()\n event.Skip()", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def drag(self, event):\n if event.button:\n try:\n x_loc, y_loc = self.appWindow.spec_cv.mouse(event)\n print(x_loc, y_loc)\n trackNo, updated_track =\\\n self.model.updateTrackDrag(x_loc, y_loc,\\\n self.locked_track, self.x_high)\n self.appWindow.spec_cv.updateTrack(trackNo, updated_track)\n self.appWindow.spec_cv.redrawTracks()\n except TypeError:\n pass", "def drag_motion(self, widget, context, x, y, t):\n \n if self.mouse_click_point:\n self.dy = y - self.mouse_click_point\n else:\n self.mouse_click_point = y", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x", "def OnMouse(self, event):\n if not event.Dragging():\n self._dragPos = None\n if self.HasCapture():\n self.ReleaseMouse()\n return\n else:\n if not self.HasCapture():\n self.CaptureMouse()\n\n if not self._dragPos:\n self._dragPos = event.GetPosition()\n else:\n pos = event.GetPosition()\n displacement = self._dragPos - pos\n self.SetPosition(self.GetPosition() - displacement)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def drag(self,x,y):\n self.x=x\n self.y=y", "def mouseMoveEvent(self, e):\n if self.mousePressed:\n Globals.dragObject = QTextDrag('PKSampler: dragging a track', self)\n Globals.dragObject.trackFrame = self\n Globals.dragObject.dragCopy()", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def ClickAndDrag(self, delta_x=0, delta_y=0):\n self._EnsureHIDValueInRange(delta_x)\n self._EnsureHIDValueInRange(delta_y)\n self._PressLeftButton()\n self.Move(delta_x, delta_y)\n self._ReleaseAllButtons()", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def doubleclick(point):\n m = PyMouse()\n m.press(*point)\n m.release(*point)\n m.press(*point)\n m.release(*point)", "def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)", "def ev_MOUSEMOTION(self, event):", "def mouse_motion(self, dx, dy):\n dx /= 8; dy /= 8\n self.rot[0] += dy; self.rot[1] -= dx\n if self.rot[0] > 90: self.rot[0] = 90\n elif self.rot[0] < -90: self.rot[0] = -90", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def cmdMouse(self, dev):\n self.hitsMouses[dev] = False\n f = open(self.inputPath + dev, 'rb')\n while self.live:\n f.read(500) # 144 kan eigenlijk alles zijn, behalve absurbt hoge waarden..\n self.hitsMouses[dev] = True\n time.sleep(0.1)", "def dnd_motion(self, source, event):", "def game_click(coord):\n mouseclick(coord[0], coord[1])\n time.sleep(0.5)", "def sling_action():\n global mouse_distance\n global rope_lenght\n global angle\n global x_mouse\n global y_mouse\n # Fixing bird to the sling rope\n v = vector((sling_x, sling_y), (x_mouse, y_mouse))\n uv = unit_vector(v)\n uv1 = uv[0]\n uv2 = uv[1]\n # mouse_distance = distance(sling_x, sling_y, x_mouse, y_mouse)\n sling = Vec2d(sling_x, sling_y)\n mouse = Vec2d(x_mouse, y_mouse)\n mouse_distance = (sling - mouse).length\n\n pu = (uv1*rope_lenght+sling_x, uv2*rope_lenght+sling_y)\n bigger_rope = 102\n x_redbird = x_mouse - 20\n y_redbird = y_mouse - 20\n if mouse_distance > rope_lenght:\n pux, puy = pu\n pux -= 20\n puy -= 20\n pul = pux, puy\n screen.blit(redbird, pul)\n pu2 = (uv1*bigger_rope+sling_x, uv2*bigger_rope+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu2, 5)\n screen.blit(redbird, pul)\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu2, 5)\n else:\n mouse_distance += 10\n pu3 = (uv1*mouse_distance+sling_x, uv2*mouse_distance+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu3, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu3, 5)\n # Angle of impulse\n dy = y_mouse - sling_y\n dx = x_mouse - sling_x\n if dx == 0:\n dx = 0.00000000000001\n angle = math.atan((float(dy))/dx)", "def slider_dragged(self):\n pass", "def move_mouse(kf_x, m, img): \n exponent = 1.6\n x, y, x_vel, y_vel = (int(kf_x[0]), int(kf_x[1]), kf_x[2], kf_x[3])\n mx, my = m.position()\n win_height, win_width, channel = img.shape\n x_screen, y_screen = m.screen_size()\n min_x, max_x = 0, x_screen\n min_y, max_y = 0, y_screen \n\n #Calculations\n speed = np.sqrt(x_vel**2 + y_vel**2) \n power = math.pow(speed, exponent) \n ratio = speed / power\n theta = math.atan2(y_vel, x_vel) \n x_comp = power * math.cos(theta) \n y_comp = power * math.sin(theta) \n xf, yf = mx + x_comp, my + y_comp\n\n if xf < min_x: \n xf = min_x\n elif xf > max_x: \n xf = max_x\n elif yf < min_y: \n yf = min_y\n elif yf > max_y: \n yf = max_y\n m.move(xf, yf)\n return speed", "def move_the_mouse():\n # Get the screen size\n screen_width, screen_height = pyautogui.size()\n # Move the mouse in a rectange shape\n pyautogui.moveTo(60, 60, duration=0.50)\n pyautogui.moveTo(screen_width - 60, 60, duration=0.50)\n pyautogui.moveTo(screen_width - 60, screen_height - 60, duration=0.50)\n pyautogui.moveTo(60, screen_height - 60, duration=0.50)", "def OnMouseDown(self, evt):\n self.CaptureMouse()\n self.x, self.y = self.lastx, self.lasty = evt.GetPosition()", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)" ]
[ "0.6801119", "0.664212", "0.6326401", "0.6171946", "0.6171946", "0.6129287", "0.6114349", "0.5991088", "0.5924613", "0.58440715", "0.58440715", "0.58381003", "0.58319557", "0.58042", "0.5794935", "0.57604647", "0.57600874", "0.5743456", "0.57369435", "0.572792", "0.5670328", "0.5652627", "0.5652511", "0.56517774", "0.56449383", "0.5627536", "0.5590589", "0.5582476", "0.5578193", "0.5576742" ]
0.6645221
1
Simulates a mouse double click
def doubleclick(point): m = PyMouse() m.press(*point) m.release(*point) m.press(*point) m.release(*point)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def double_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_double_click(self, *args)", "def double_clicked(mouse):\n global state, current_action\n\n smallest_element = get_element(mouse)\n\n with data_lock:\n if smallest_element:\n state = 0\n current_action = wtl.actions.Click(wtl.Selector(f'[wtl-uid=\"{smallest_element.wtl_uid}\"]'))", "def double_click(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, double=True)", "def double_click(dsk_session: WebDriver, element: WebElement):\n actions = ActionChains(dsk_session)\n actions.move_to_element(element)\n actions.double_click(element).perform()", "def doubleClick(self):\n cmdId = self.executeCommand(Command.DOUBLE_CLICK)\n return cmdId", "def double_click(self):\n self.scroll_to()\n ActionChains(self.driver).double_click(self._element).perform()", "def double_click(\n self,\n locator: Locator,\n wait_time: Optional[float] = None,\n timeout: Optional[float] = None,\n ) -> WindowsElement:\n return self._mouse_click(locator, \"DoubleClick\", wait_time, timeout)", "def game_click(coord):\n mouseclick(coord[0], coord[1])\n time.sleep(0.5)", "def mouseDoubleClickEvent(self, event):\n event.ignore()", "def double_click_power(self):\n get_power_event_cmd = (\"getevent -pl 2>&1 | sed -n \"\n \"'/^add/{h}/KEY_POWER/{x;s/[^/]*//p}'\")\n input_event = self.adb.exec_adb_cmd(\n \"shell '{cmd}'\".format(cmd=get_power_event_cmd)).communicate()[0]\n\n self.android_device_driver.adb.exec_adb_cmd(\"shell '{cmd}'\".format(\n cmd=DOUBLE_CLICK_POWER_EVENT_TEMPLATE.format(input_event=input_event)))", "def click(self, x, y, btn):\n if btn == LEFT_BTN:\n lastClickTime, lastClickPos = self._lastClick\n\n # Signal mouse double clicked event first\n if (time.time() - lastClickTime) <= self._DOUBLE_CLICK_TIMEOUT:\n # Use position of first click\n eventDict = prepareMouseSignal('mouseDoubleClicked', 'left',\n *lastClickPos)\n self.plot.notify(**eventDict)\n\n self._lastClick = 0., None\n else:\n # Signal mouse clicked event\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n eventDict = prepareMouseSignal('mouseClicked', 'left',\n dataPos[0], dataPos[1],\n x, y)\n self.plot.notify(**eventDict)\n\n self._lastClick = time.time(), (dataPos[0], dataPos[1], x, y)\n\n elif btn == RIGHT_BTN:\n # Signal mouse clicked event\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n eventDict = prepareMouseSignal('mouseClicked', 'right',\n dataPos[0], dataPos[1],\n x, y)\n self.plot.notify(**eventDict)", "def ev_MOUSEDOWN(self, event):", "def __itemSingleClickedToDouble(self, item, col):\n self.itemDoubleClicked.emit(item, col)", "def doubleClick(self, event):\n doRotation = False\n\n self.can.focus_force()\n interCircle = self.findInter(event.x, event.y)\n if interCircle:\n xa, ya, xb, yb = self.can.coords(interCircle)\n inter = ((xa + xb)/2, (ya + yb)/2)\n for seg in self.segs:\n if (not doRotation) and (seg.getStartPoint() == inter):\n doRotation = True\n lastInter = inter\n if doRotation:\n seg.place(lastInter)\n seg.rotate(pi/2)\n lastInter = seg.getEndPoint()\n\n self.wipe(self.segs)", "def isDoubleClicked(self, timeout=20.0, commandId=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n return self.isActionAccepted(timeout=timeout, commandName=Command.DOUBLE_CLICK, \n commandId=commandId)", "def set_doubleclick_slot(self, fun):\n self.doubleclick_fun = fun", "def selectitem_double_click(a):\n\n view_thumbnail_main(treeview)", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def _perform_click_input(\n button=\"left\",\n coords=(None, None),\n double=False,\n button_down=True,\n button_up=True,\n wheel_dist=0,\n pressed=\"\",\n key_down=True,\n key_up=True,\n fast_move=False\n):\n\n # Handle if the mouse buttons are swapped\n if win32functions.GetSystemMetrics(win32defines.SM_SWAPBUTTON):\n if button.lower() == 'left':\n button = 'right'\n elif button.lower() == 'right':\n button = 'left'\n\n events = []\n if button.lower() == 'left':\n events.append(win32defines.MOUSEEVENTF_MOVE)\n if button_down:\n events.append(win32defines.MOUSEEVENTF_LEFTDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_LEFTUP)\n elif button.lower() == 'right':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_RIGHTDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_RIGHTUP)\n elif button.lower() == 'middle':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_MIDDLEDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_MIDDLEUP)\n elif button.lower() == 'move':\n events.append(win32defines.MOUSEEVENTF_MOVE)\n events.append(win32defines.MOUSEEVENTF_ABSOLUTE)\n elif button.lower() == 'x':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_XDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_XUP)\n\n if button.lower() == 'wheel':\n events.append(win32defines.MOUSEEVENTF_WHEEL)\n\n # if we were asked to double click (and we are doing a full click\n # not just up or down.\n if double and button_down and button_up:\n events *= 2\n\n if button_down and (button.lower() not in ['move', 'wheel']):\n # wait while previous click is not affecting our current click\n while 0 < win32api.GetTickCount() - win32api.GetLastInputInfo() < win32gui.GetDoubleClickTime():\n time.sleep(Timings.after_clickinput_wait)\n\n # set the cursor position\n _set_cursor_pos((coords[0], coords[1]))\n if not fast_move:\n time.sleep(Timings.after_setcursorpos_wait)\n if win32api.GetCursorPos() != (coords[0], coords[1]):\n _set_cursor_pos((coords[0], coords[1]))\n time.sleep(Timings.after_setcursorpos_wait)\n\n keyboard_keys = pressed.lower().split()\n if ('control' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_CONTROL, up=False).run()\n if ('shift' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_SHIFT, up=False).run()\n if ('alt' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_MENU, up=False).run()\n\n dw_flags = 0\n for event in events:\n dw_flags |= event\n\n dw_data = 0\n if button.lower() == 'wheel':\n wheel_dist = wheel_dist * 120\n dw_data = wheel_dist\n\n if button.lower() == 'move':\n x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)\n y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)\n x_coord = int(ceil(coords[0] * 65535 / (x_res - 1.))) # in Python 2.7 return float val\n y_coord = int(ceil(coords[1] * 65535 / (y_res - 1.))) # in Python 2.7 return float val\n win32api.mouse_event(dw_flags, x_coord, y_coord, dw_data)\n else:\n for event in events:\n if event == win32defines.MOUSEEVENTF_MOVE:\n x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)\n y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)\n x_coord = int(ceil(coords[0] * 65535 / (x_res - 1.))) # in Python 2.7 return float val\n y_coord = int(ceil(coords[1] * 65535 / (y_res - 1.))) # in Python 2.7 return float val\n win32api.mouse_event(\n win32defines.MOUSEEVENTF_MOVE | win32defines.MOUSEEVENTF_ABSOLUTE,\n x_coord, y_coord, dw_data)\n else:\n win32api.mouse_event(\n event | win32defines.MOUSEEVENTF_ABSOLUTE,\n coords[0], coords[1], dw_data)\n\n if not fast_move:\n time.sleep(Timings.after_clickinput_wait)\n\n if ('control' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_CONTROL, down=False).run()\n if ('shift' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_SHIFT, down=False).run()\n if ('alt' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_MENU, down=False).run()", "def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def on_dclick ( self, object ):\n pass", "def mouse_click(left_right, down_up, x, y):\n mouse_events = {\n \"leftdown\": 0x8002,\n \"leftup\": 0x8004,\n \"rightdown\": 0x8008,\n \"rightup\": 0x8010\n }\n ctypes.windll.user32.SetCursorPos(x, y)\n ctypes.windll.user32.mouse_event(mouse_events[left_right.lower() + down_up.lower()], int(x), int(y), 0, 0)", "def right_click(coords=(0, 0)):\n _perform_click_input(button='right', coords=coords)", "def right_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)", "def __editorDoubleClicked(self, editor, pos, buttons):\n self.editorDoubleClickedEd.emit(editor, pos, buttons)", "def twEthogram_doubleClicked(self):\n if self.observationId:\n if self.playerType == VIEWER:\n QMessageBox.critical(self, programName, (\"A observação atual é aberta no modo VIEW.\\n\"\n \"Não é permitido registrar eventos neste modo.\"))\n return\n\n if self.twEthogram.selectedIndexes():\n ethogramRow = self.twEthogram.selectedIndexes()[0].row()\n event = self.full_event(str(ethogramRow))\n self.writeEvent(event, self.getLaps())\n else:\n self.no_observation()" ]
[ "0.7985254", "0.7950458", "0.7732801", "0.759767", "0.7337703", "0.723603", "0.7195858", "0.69655806", "0.6576329", "0.64840645", "0.6409206", "0.63825154", "0.62824076", "0.6248159", "0.6243938", "0.6222214", "0.62110955", "0.61339504", "0.61338806", "0.6060498", "0.6041223", "0.6041112", "0.6038628", "0.60127014", "0.59945893", "0.596043", "0.59462774", "0.5917708", "0.59176713", "0.58735746" ]
0.81779
0
Stimulates typing a string of characters
def type_msg(string): k = PyKeyboard() k.type_string(string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def characters(self, data):\n pass", "def type_out(text):\n disable_typing.start()\n text = text + \"\\n\"\n for c in text:\n sys.stdout.write(c)\n sys.stdout.flush()\n time.sleep(0.01)\n disable_typing.stop()", "def _type_text(text):\n FlaUIKeyboard.Type(str(text))", "def do(s):\r\n return get_AA_subs(generate_mutString(s))", "def _transform_by_type(self, text):\n\t\treturn '*' * len(text) if self._model.inputType == 'password' else text", "def mutateString(val, numMutate, ctype):\n\tmutations = set()\n\tcount = 0\n\twhile count < numMutate:\n\t\tj = randint(0, len(val)-1)\n\t\tif j not in mutations:\n\t\t\tif ctype == \"alpha\":\n\t\t\t\tch = selectRandomFromList(alphaTokens)\n\t\t\telif ctype == \"num\":\n\t\t\t\tch = selectRandomFromList(numTokens)\n\t\t\telif ctype == \"any\":\n\t\t\t\tch = selectRandomFromList(tokens)\n\t\t\tval = val[:j] + ch + val[j+1:]\n\t\t\tmutations.add(j)\n\t\t\tcount += 1\n\treturn val", "def _put(self, char: str, index: int = 0) -> None:\n # pylint: disable=too-many-return-statements\n if not 0 <= index < self._chars:\n return\n index = self._adjusted_index(index)\n if self._chardict and char in self._chardict:\n self._set_buffer(index, self._chardict[char])\n return\n char = char.lower()\n if char == \".\":\n self._set_buffer(index, self._get_buffer(index) | 0b10000000)\n return\n if char in \"abcdefghijklmnopqrstuvwxy\":\n character = ord(char) - 97 + 10\n elif char == \"-\":\n character = 36\n elif char in \"0123456789\":\n character = ord(char) - 48\n elif char == \" \":\n self._set_buffer(index, 0x00)\n return\n elif char == \":\":\n self._set_buffer(4, 0x02)\n return\n elif char == \";\":\n self._set_buffer(4, 0x00)\n return\n elif char in \"lL\":\n self._set_buffer(index, 0b00111000)\n return\n elif char in \"oO\":\n self._set_buffer(index, 0b00111111)\n return\n else:\n return\n self._set_buffer(index, NUMBERS[character])", "async def typewriter(typew):\n if not typew.text[0].isalpha() and typew.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n textx = await typew.get_reply_message()\n message = typew.pattern_match.group(1)\n if message:\n pass\n elif textx:\n message = textx.text\n else:\n await typew.edit(\"`Give a text to type!`\")\n return\n sleep_time = 0.03\n typing_symbol = \"|\"\n old_text = ''\n await typew.edit(typing_symbol)\n await asyncio.sleep(sleep_time)\n for character in message:\n old_text = old_text + \"\" + character\n typing_text = old_text + \"\" + typing_symbol\n await typew.edit(typing_text)\n await asyncio.sleep(sleep_time)\n await typew.edit(old_text)\n await asyncio.sleep(sleep_time)", "def non_secret_char(c):\n return c", "def typeify(s):\n try:\n return literal_eval(s)\n except:\n return s", "def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)", "def __call__(self, s, a=None, d=None):\n if a is None: a = self.allchars\n if d is None: d = self.delchars\n return s.translate(a,d)", "def _convert_chars_in_struct_attrs_simple(self, s):\n return re.sub(r'''(([\\\"\\']).*?\\2)''',\n lambda mo: self._convert_chars(mo.group(0)), s)", "def test_string_insertion(a_string, a_character):\n for position in range(0, len(a_string)+1):\n print a_string[:position] + a_character + a_string[position:]", "def c_chars(x):\r\n return (c_char * len(x))(*x)", "def _put(self, char: str, index: int = 0) -> None:\n if not 0 <= index < self._chars:\n return\n if not 32 <= ord(char) <= 127:\n return\n if char == \".\":\n self._set_buffer(\n self._adjusted_index(index * 2 + 1),\n self._get_buffer(self._adjusted_index(index * 2 + 1)) | 0b01000000,\n )\n return\n character = ord(char) * 2 - 64\n self._set_buffer(self._adjusted_index(index * 2), CHARS[1 + character])\n self._set_buffer(self._adjusted_index(index * 2 + 1), CHARS[character])", "def mutStr(st):\n\tl = len(st)\n\tci = randomInt(0, l - 1)\n\tcv = st[ci]\n\tif cv.isdigit():\n\t\tr = selectRandomFromList(dig)\n\telif cv.isupper():\n\t\tr = selectRandomFromList(ucc)\n\telse:\n\t\tr = selectRandomFromList(lcc)\n\t\n\tnst = st[:ci] + r + st[ci+1:] if l > 1 else r\n\treturn nst", "def assignCharacters(otherChars, where):\n if(isinstance(otherChars, list)):\n where.append(otherChars)\n if(isinstance(otherChars, str)):\n tmp = \" \".join(otherChars).split()\n where.append(tmp)", "def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)", "def repeat(s):\r\n\r\n return s", "def tiny_atomizer() -> atomizers.AsciiCharacterAtomizer:\n yield atomizers.AsciiCharacterAtomizer.FromText(\"Hello, world!\")", "def scinot(string):\n\t\"\"\"If there is no '+' or '-' character in string, returns it as it is.\"\"\"\n\t\"\"\"If the argument is not string, returns the argument\"\"\"\n\tif type(string) != str:\n\t\treturn string\n\telse:\n\t\tretstr = string[0]\n\t\tfor char in string[1:]:\n\t\t\tif ((char == '-')|(char == '+')):\n\t\t\t\tretstr += 'E' + char\n\t\t\telse:\n\t\t\t\tretstr += char\n\t\t\n\t\treturn retstr", "def ord(s):\n pass", "def characters(self, text):\n if text.isspace(): return\n text = str(text)\n if self.curelement == \"residue\":\n self.newresname = text\n elif self.curelement == \"atom\":\n self.newatomname = text\n elif self.curelement == \"useatomname\":\n self.oldatomname = text\n elif self.curelement == \"useresname\":\n self.oldresname = text", "def sext(self, typ):", "def autoconvert(s):\n try:\n return eval(s)\n except:\n return s", "def test_keyboard_characters(self):\n pass", "def as_you_type_onkey(self, tag: str, kwargs: Any) -> None:\n if kwargs['c'] != self.c:\n return\n if kwargs['ch'] not in '\\'\",.:) \\n\\t':\n return\n c = self.c\n spell_ok = True\n if self.spell_as_you_type: # might just be for wrapping\n w = c.frame.body.wrapper\n txt = w.getAllText()\n i = w.getInsertPoint()\n word = txt[:i].rsplit(None, 1)[-1]\n word = ''.join(i if i.isalpha() else ' ' for i in word).split()\n if word:\n word = word[-1]\n ec = c.spellCommands.handler.spellController\n suggests = ec.process_word(word)\n if suggests:\n spell_ok = False\n g.es(' '.join(suggests[:5]) +\n ('...' if len(suggests) > 5 else ''),\n color='red')\n elif suggests is not None:\n spell_ok = False\n g.es('[no suggestions]')\n self.suggestions = suggests\n self.suggestion_idx = 0\n self.word = word\n if spell_ok and self.wrap_as_you_type and kwargs['ch'] == ' ':\n w = c.frame.body.wrapper\n txt = w.getAllText()\n i = w.getInsertPoint()\n # calculate the current column\n parts = txt.split('\\n')\n popped = 0 # chars on previous lines\n while len(parts[0]) + popped < i:\n popped += len(parts.pop(0)) + 1 # +1 for the \\n that's gone\n col = i - popped\n if col > self.page_width:\n txt = txt[:i] + '\\n' + txt[i:] # replace space with \\n\n w.setAllText(txt)\n c.p.b = txt\n w.setInsertPoint(i + 1) # must come after c.p.b assignment", "def func0(s):\n\n return s+\"tsy\"", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res" ]
[ "0.57233626", "0.57059294", "0.5684674", "0.5682626", "0.5663008", "0.564662", "0.5634049", "0.56116927", "0.55977404", "0.5592762", "0.55882055", "0.5541043", "0.547264", "0.5471724", "0.5471444", "0.54415923", "0.53935754", "0.53798544", "0.53640604", "0.53581715", "0.5340967", "0.5324907", "0.5313119", "0.53122187", "0.5311633", "0.53086054", "0.53075784", "0.53040695", "0.5300662", "0.5277478" ]
0.5819586
0
Simulates a mouse wheel movement
def wheel(ticks): m = PyMouse() m.scroll(ticks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ev_mousewheel(self, event: MouseWheel) -> None:", "def on_mouse_wheel(self, e): # pragma: no cover\n super(TraceView, self).on_mouse_wheel(e)\n if e.modifiers == ('Alt',):\n start, end = self._interval\n delay = e.delta * (end - start) * .1\n self.shift(-delay)", "def on_mouse_wheel(self, event):\n self.translate -= event.delta[1]\n self.game_program['u_view'] = self.view\n\n self.yaw, self.pitch = 0, 0\n\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def wheel_click(coords=(0, 0)):\n _perform_click_input(button='middle', coords=coords)", "def emulate_wheel(self, data, direction, timeval):\n if direction == 'x':\n code = 0x06\n elif direction == 'z':\n # Not enitely sure if this exists\n code = 0x07\n else:\n code = 0x08\n\n if WIN:\n data = data // 120\n\n return self.create_event_object(\n \"Relative\",\n code,\n data,\n timeval)", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def mouse_motion(self, dx, dy):\n dx /= 8; dy /= 8\n self.rot[0] += dy; self.rot[1] -= dx\n if self.rot[0] > 90: self.rot[0] = 90\n elif self.rot[0] < -90: self.rot[0] = -90", "def ev_mousewheel(self, event: tcod.event.MouseWheel) -> T | None:", "def control_wheel(self, om_w, time_for_move, side):\n for i in range(1, len(time_for_move)):\n if side == 'left':\n print(\"OMEGA LEFT CONTROL\")\n self.set_speed(om_w[i - 1], 10)\n rospy.sleep(time_for_move[i] - time_for_move[i - 1])\n else:\n print(\"OMEGA RIGHT CONTROL\")\n self.set_speed(10, om_w[i - 1])\n rospy.sleep(time_for_move[i] - time_for_move[i - 1] - 0.5)\n self.flag = True\n self.set_speed(10, 10)", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def mouse_wheel_up(self):\n if not self.scroll_element is None:\n self.scroll_element.mouse_wheel_up()", "def draw_wheel():\r\n\touter_radius = 1\r\n\tthickness = .4\r\n\tif wireframe:\r\n\t\tglutWireTorus(thickness,outer_radius - thickness,8,8)\r\n\telse:\r\n\t\tglutSolidTorus(thickness,outer_radius - thickness,8,8)\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireTorus(thickness+.01,outer_radius - thickness + 0.005,8,8)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()", "def emit_mouse(self, report):\n for name, attr in self.layout.mouse.items():\n # If the attr is a tuple like (left_analog_y, \"-\")\n # then set the attr to just be the first item\n attr, modifier = attr\n\n if attr.startswith(\"trackpad_touch\"):\n active_attr = attr[:16] + \"active\"\n if not getattr(report, active_attr):\n self.mouse_pos.pop(name, None)\n continue\n\n pos = getattr(report, attr)\n if name not in self.mouse_pos:\n self.mouse_pos[name] = pos\n\n sensitivity = 0.5\n self.mouse_rel[name] += (pos - self.mouse_pos[name]) * sensitivity\n self.mouse_pos[name] = pos\n\n elif \"analog\" in attr:\n pos = getattr(report, attr)\n if (pos > (128 + self.mouse_analog_deadzone)\n or pos < (128 - self.mouse_analog_deadzone)):\n accel = (pos - 128) / 10\n else:\n continue\n\n # If a minus modifier has been given then minus the acceleration\n # to invert the direction.\n if (modifier and modifier == \"-\"):\n accel = -accel\n\n sensitivity = self.mouse_analog_sensitivity\n self.mouse_rel[name] += accel * sensitivity\n\n # Emulate mouse wheel (needs special handling)\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n ecode = ecodes.REL_WHEEL # The real event we need to emit\n write = False\n if getattr(report, attr):\n self._scroll_details['direction'] = name\n now = time.time()\n last_write = self._scroll_details.get('last_write')\n if not last_write:\n # No delay for the first button press for fast feedback\n write = True\n self._scroll_details['count'] = 0\n if name == ecodes.REL_WHEELUP:\n value = 1\n elif name == ecodes.REL_WHEELDOWN:\n value = -1\n if last_write:\n # Delay at least one cycle before continual scrolling\n if self._scroll_details['count'] > 1:\n if now - last_write > self.scroll_delay:\n write = True\n elif now - last_write > self.scroll_repeat_delay:\n write = True\n if write:\n self.device.write(ecodes.EV_REL, ecode, value)\n self._scroll_details['last_write'] = now\n self._scroll_details['count'] += 1\n continue # No need to proceed further\n else:\n # Reset so you can quickly tap the button to scroll\n if self._scroll_details.get('direction') == name:\n self._scroll_details['last_write'] = 0\n self._scroll_details['count'] = 0\n\n rel = int(self.mouse_rel[name])\n self.mouse_rel[name] = self.mouse_rel[name] - rel\n self.device.write(ecodes.EV_REL, name, rel)\n\n self.device.syn()", "def side_wheel_from_axis():", "def handle_scrollwheel(self, event):\n delta_x, delta_y, delta_z = self._get_deltas(event)\n if delta_x:\n self.events.append(\n self.emulate_wheel(delta_x, 'x', self.timeval))\n if delta_y:\n self.events.append(\n self.emulate_wheel(delta_y, 'y', self.timeval))\n if delta_z:\n self.events.append(\n self.emulate_wheel(delta_z, 'z', self.timeval))", "def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return", "def mouse_wheel_down(self):\n if not self.scroll_element is None:\n self.scroll_element.mouse_wheel_down()", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)", "def set_wheel_speed(self, om_l, om_r):\n self.om_left = om_l\n self.om_right = om_r", "def wheels(self, left, right):\n\t\twith self.data_lock:\n\t\t\tself.leftSpeed \t= left\t/500.0\t# leftSpeed and rightSpeed are in pixels per second\n\t\t\tself.rightSpeed = right\t/500.0", "def wheel():\n wheel_pos = read_npy_file('wheel.position.npy')\n wheel_timestamps = read_npy_file('wheel.timestamps.npy')\n wheel_rate = get_rate(wheel_timestamps)\n\n wheel_ts = TimeSeries(\n name='wheel_position',\n starting_time=wheel_timestamps[0, 1],\n rate=wheel_rate,\n data=np.ravel(wheel_pos),\n unit='mm',\n conversion=0.135,\n description='The position reading of the rotary encoder attached to '\n 'the rubber wheel that the mouse pushes left and right '\n 'with his forelimbs.',\n comments='The wheel has radius 31 mm and 1440 ticks per revolution, '\n 'so multiply by 2*pi*r/tpr=0.135 to convert to millimeters. '\n 'Positive velocity (increasing numbers) correspond to clockwise '\n 'turns (if looking at the wheel from behind the mouse), i.e. '\n 'turns that are in the correct direction for stimuli presented '\n 'to the left. Likewise negative velocity corresponds to right choices.'\n )\n nwb_file.add_acquisition(wheel_ts)", "def handle_scrollwheel(self, event):\n # relative Scrollwheel\n scroll_x, scroll_y = self._get_scroll(event)\n\n if scroll_x:\n self.events.append(\n self.emulate_wheel(scroll_x, 'x', self.timeval))\n\n if scroll_y:\n self.events.append(\n self.emulate_wheel(scroll_y, 'y', self.timeval))", "def set_wheel(self, wheel):\n self.wheel_turn = clamp(wheel, -1, 1)", "def mouse_wheelEvent(self, e):\n if self.image is not None:\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n if modifiers == QtCore.Qt.ControlModifier:\n wheel_counter = e.angleDelta()\n if wheel_counter.y() / 120 == -1:\n if self.width_result_image == 1000:\n pass\n else:\n self.width_result_image -= 100\n\n if wheel_counter.y() / 120 == 1:\n if self.width_result_image == 4000:\n pass\n else:\n self.width_result_image += 100\n self.show_to_window()", "def mouse(*args, enableScrollWheel: bool=True, mouseButtonTracking: int=0,\n mouseButtonTrackingStatus: bool=True, scrollWheelStatus: bool=True, **kwargs)->int:\n pass", "def move_mouse(kf_x, m, img): \n exponent = 1.6\n x, y, x_vel, y_vel = (int(kf_x[0]), int(kf_x[1]), kf_x[2], kf_x[3])\n mx, my = m.position()\n win_height, win_width, channel = img.shape\n x_screen, y_screen = m.screen_size()\n min_x, max_x = 0, x_screen\n min_y, max_y = 0, y_screen \n\n #Calculations\n speed = np.sqrt(x_vel**2 + y_vel**2) \n power = math.pow(speed, exponent) \n ratio = speed / power\n theta = math.atan2(y_vel, x_vel) \n x_comp = power * math.cos(theta) \n y_comp = power * math.sin(theta) \n xf, yf = mx + x_comp, my + y_comp\n\n if xf < min_x: \n xf = min_x\n elif xf > max_x: \n xf = max_x\n elif yf < min_y: \n yf = min_y\n elif yf > max_y: \n yf = max_y\n m.move(xf, yf)\n return speed", "def wheelEvent(self, ev):\n\n # Check if we're in auto Zoom mode\n if self.__zooming:\n # we're zooming\n if (ev.angleDelta().y() > 0):\n self.zoom(ev.pos(), 1)\n else:\n self.zoom(ev.pos(), -1)\n\n else:\n # not zooming - pass wheel event on\n self.mouseWheel.emit(self, ev)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def Scroll(self, steps):\n self._EnsureHIDValueInRange(steps)\n self._kit.MouseScroll(steps)\n time.sleep(self.send_delay)" ]
[ "0.70724034", "0.6867929", "0.6845834", "0.6725395", "0.6706772", "0.66839576", "0.66106194", "0.64879376", "0.6433302", "0.6428224", "0.6424929", "0.63497424", "0.6329097", "0.62871855", "0.62500674", "0.62429804", "0.62024975", "0.61738163", "0.6163825", "0.6155793", "0.613951", "0.61080164", "0.61019105", "0.6084358", "0.60623366", "0.5971133", "0.5964755", "0.59545106", "0.5940277", "0.5930189" ]
0.79435927
0
Compresses a byte array with the xz binary
def compress(value): process = Popen(["xz", "--compress", "--force"], stdin=PIPE, stdout=PIPE) return process.communicate(value)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swap_byte(byte_array, index):\n\n if byte_array[index] == 0:\n changed_byte_array = byte_array[0:index] + b\"\\xff\" + byte_array[index + 1 :]\n changed_byte_array = byte_array[0:index] + b\"\\x00\" + byte_array[index + 1 :]\n return changed_byte_array", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)", "def test_compress_1_char(self):\n text = 'a'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'a')\n self.assertEqual(actual, expected)", "def compression(binary_sequence:str):\r\n compressed_sequence = \"\"\r\n calcul_byte =(len(binary_sequence) % 8)\r\n if calcul_byte != 0:\r\n binary_sequence = (8 - calcul_byte)*'0' + binary_sequence\r\n \"\"\" \r\n Add the missing 0's at the beginning of the string so that its length \r\n is divisible by 8 without remainder\r\n \"\"\"\r\n for byte in range(0, len(binary_sequence), 8):\r\n compressed_sequence += chr(int(binary_sequence[byte:byte+8], 2))\r\n return (compressed_sequence, calcul_byte)", "def savez_compressed(file, *args, **kwds):\n\n ary_list = []\n for a in args:\n ary_list.append(array_create.array(a, bohrium=False))\n return numpy.savez_compressed(file, *ary_list, **kwds)", "def test_compress_2_idenctical_char(self):\n text = 'aa'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'aa')\n self.assertEqual(actual, expected)", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def zigzag_encode(x):\n x = np.asarray(x, dtype=np.int32)\n return (np.abs(x) << 1) - (x > 0).astype(np.int32)", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def vcf_compress(fn):\n ret = cmd_exe(f\"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz\")", "def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def gzdeflate():\n return zlib.compress(val)", "def byteswap(data, word_size=4):\n return reduce(lambda x,y: x+''.join(reversed(y)), chunks(data, word_size), '')", "def _saveBinaryData_compressed(self, file, with_axis=None):\n if with_axis is not None:\n data = self._data_with_axis(with_axis)\n numpy.save_compressed(file, data=data)\n else:\n numpy.savez_compressed(file, data=self.data)", "def test_decompress_1(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 49])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabc'\n self.assertEqual(actual, expected)", "def as_bytes_compressed(self) -> bytes:\n bits_len = self.end()\n whole_bytes_len = div_ceil(bits_len, 8)\n\n key = self.raw_key()[0:whole_bytes_len]\n\n result = bytearray()\n result += leb128_encode_unsigned(bits_len)\n result += key\n\n # Trim insignificant bits in the last byte:\n bits_in_last_byte = bits_len % 8\n if whole_bytes_len > 0 and bits_in_last_byte != 0:\n tail = self.end() % 8\n result[-1] = reset_bits(result[-1], tail)\n\n return bytes(result)", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def encode(input_: list):\n global n_bytes\n block = bytearray()\n\n for tup in input_:\n arr = np.array(tup[0], dtype=tup[1]).tobytes()\n n_bytes += len(arr)\n block += arr\n\n return block", "def data_zip(self, data):\n stringio = StringIO.StringIO()\n gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb')\n gzip_file.write(data)\n gzip_file.close()\n return stringio.getvalue()", "def compression(s):", "def test_compress_4_idenctical_char(self):\n text = 'bbbb'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'bb') + bytearray([0, 16])\n self.assertEqual(actual, expected)", "def to_zarr(self, *args, **kwargs):\n if (\n len(args) == 1\n and isinstance(args[0], str)\n and args[0].endswith(\".zarr.zip\")\n ):\n if {\"compression\", \"mode\"}.issuperset(kwargs.keys()):\n import zarr\n\n with zarr.ZipStore(args[0], **kwargs) as store:\n self.to_zarr(store)\n return\n return super().to_zarr(*args, **kwargs)", "def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def test_compress_seq_diff_8_char(self):\n text = '12345678'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'12345678')\n self.assertEqual(actual, expected)", "def __init__( self, bytes_reverse=False, bits_reverse=False, insert_at_msb=False ):\n self.output = bytearray()\n self.bits_reverse = bits_reverse\n self.bytes_reverse = bytes_reverse\n self.insert_at_msb = insert_at_msb\n self.bits_remaining = 8\n self.current_bits = 0" ]
[ "0.5709028", "0.56780964", "0.5647832", "0.5616013", "0.56071866", "0.55910605", "0.55739576", "0.55164623", "0.5504677", "0.5499852", "0.5489234", "0.5489234", "0.5486275", "0.5466205", "0.54346704", "0.54300076", "0.540067", "0.53756106", "0.53609854", "0.53576124", "0.535414", "0.534572", "0.53125453", "0.531048", "0.53036666", "0.52904356", "0.5274616", "0.5264539", "0.5254426", "0.5245364" ]
0.5999412
1
Compress the file at 'path' with the xz binary
def compress_file(path): process = Popen(["xz", "--compress", "--force", "--stdout", path], stdout=PIPE) return process.communicate()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zip_file(file_path: str) -> str:\n zip_file_path: str = file_path + \".gz\"\n\n print(f\"Compressing {file_path} into {zip_file_path}\")\n timestamp=path.getmtime(file_path)\n with open(file_path, \"rb\") as read_stream:\n with gzip.open(zip_file_path, \"wb\") as write_stream:\n shutil.copyfileobj(read_stream, write_stream)\n os.utime(zip_file_path, (timestamp,timestamp) )\n\n return zip_file_path", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(filename, remove=False):\n import gzip\n fin = open(filename, 'rb')\n fout = gzip.open(filename+'.gz', 'wb')\n fout.writelines(fin)\n fout.close()\n fin.close()\n if remove == True:\n os.remove(filename)\n return", "def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content", "def handle_file(self, path):\n\n if path:\n if not matches_patterns(path, self.gzip_patterns):\n return\n\n try:\n original_file = self.open(path, mode=\"rb\")\n except FileNotFoundError:\n pass\n else:\n gzipped_path = \"{0}.gz\".format(path)\n\n if self.exists(gzipped_path):\n self.delete(gzipped_path)\n\n gzipped_file = self._compress(original_file)\n gzipped_path = self.save(gzipped_path, gzipped_file)\n\n return gzipped_path, gzipped_path, True", "def _compress_file(filename: str, basename: str):\n write_mode = _get_write_mode(filename)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n shutil.move(filename, os.path.join(tmpdir, basename))\n with tarfile.open(filename, write_mode) as tarball:\n tarball.add(tmpdir, arcname='')", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def prepare_gz(self, filename, *args, **kwargs):\n\n return '/vsigzip/' + filename, args, kwargs", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def vcf_compress(fn):\n ret = cmd_exe(f\"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz\")", "def archive(filepath,archive_dir='archive'):\n\n # Make sure we have a directory to archive to\n try:\n mkdir(archive_dir)\n except:\n print(\"Error making archive directory\")\n return\n\n try:\n (dir, filename) = os.path.split(filepath)\n outfile = os.path.join(dir,archive_dir,filename)+'.gz'\n with open(filename, 'rb') as f_in, gzip.open(outfile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n print(\"Error archiving \",filepath)\n print(e)\n else:\n try:\n os.remove(filepath)\n except:\n print(\"Error removing \",filepath)", "def zipfile(filepath, cleanup=False):\n\tzfile = filepath+\".gz\"\n\twith open(filepath, 'rb') as f_in:\n\t\twith gzip.open(zfile, 'wb') as f_out:\n\t\t\tf_out.writelines(f_in)\t\n\t\n\tif cleanup and file_exists(zfile):\n\t\tos.remove(filepath)\n\treturn zfile", "def compress_files(self):\n archive_file_path = tkinter.filedialog.asksaveasfilename(parent=self,\n defaultextension=\".zip\",\n filetypes=[(\"Zip File\", \"*.zip\")])\n treeview_items = self.files_treeview.get_children()\n if archive_file_path and treeview_items:\n with ZipFile(archive_file_path, \"w\", ZIP_DEFLATED) as archive:\n for row in treeview_items:\n file_path = self.files_treeview.item(row, \"values\")[0]\n file_name = os.path.basename(file_path)\n archive.write(file_path, arcname=file_name)", "def compress_file(map_, name, save_path):\n size = os.path.getsize(save_path)\n temp = subprocess.run([\"gzip\", \"-k\", save_path])\n cr_size = os.path.getsize(save_path+\".gz\")\n try:\n map_[name] = cr_size / size\n except Exception as e:\n print(f\"File: {save_path}, Ori:{size}, Compr:{cr_size}\")\n print(e)\n raise ZeroDivisionError\n temp = subprocess.run([\"rm\", save_path])\n temp = subprocess.run([\"rm\", save_path+\".gz\"])", "def compress_file(netcdf_file_name):\n\n radar_io.compress_file(netcdf_file_name)", "def archive_file(filename, maxsize):#{{{\n if not os.path.exists(filename):\n print(filename, \"does not exist. ignore.\", file=sys.stderr)\n return 1\n\n filesize = os.path.getsize(filename)\n if filesize > maxsize:\n cnt = 0\n zipfile = \"\"\n while 1:\n cnt += 1\n zipfile = \"%s.%d.gz\"%(filename, cnt)\n if not os.path.exists(zipfile):\n break\n # write zip file\n try:\n f_in = open(filename, 'rb')\n except IOError:\n print(\"Failed to read %s\"%(filename), file=sys.stderr)\n return 1\n try:\n f_out = gzip.open(zipfile, 'wb')\n except IOError:\n print(\"Failed to write to %s\"%(zipfile), file=sys.stderr)\n return 1\n\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n print(\"%s is archived to %s\"%(filename, zipfile))\n os.remove(filename)\n return 0", "def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)", "def save_to_gzip(data,fname):\n with gzip.open(fname + '.gz', 'wb',compresslevel = 9) as f:\n f.write(data.tobytes())", "def compress(path, path_out, terms, iterations, annotate, silent):\n if terms is None:\n terms = DEFAULT_TERMS\n\n if not silent:\n print(f\"Compressing image...\")\n\n result = compress_image_to_file(path=path, terms=terms,\n iterations=iterations,\n path_out=path_out,\n annotate=annotate)\n\n output_path = result['output_path']\n\n if not silent:\n print(f\"Compressed to:\\n{output_path}\")\n print(f\"Terms in singular value expansion: {terms}\")\n print(f\"Power method iterations: {result['iterations']}\")\n print(f\"Compression ratio: {result['compression_ratio']}\")\n\n return result", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def compress_directory(directory, filename):\r\n mode = 'w:gz'\r\n name = path(directory).name\r\n with tarfile.open(filename, mode) as tar_file:\r\n tar_file.add(directory, arcname=name)", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def compress_to_tgz(in_path, tgz_fp):\n t = tarfile.open(name = tgz_fp, mode = 'w:gz')\n t.add(in_path, path.basename(in_path))\n t.close()", "def gzip_worker(args):\n\tp = multiprocessing.current_process()\n\tprint('Start zipping %s: %s %s' %(args[1], p.name, p.pid))\n\tpath = args[0]\n\tfilename = args[1]\n\tassert os.path.splitext(filename)[1] == '.fastq', '%s is not a fastq file' %filename \n\t\n\tcall = 'gzip -c ' + os.path.join(path, filename) + ' > ' + os.path.join(path, filename) + '.gz'\n\tsubprocess.call(call, shell=True)\n\tprint('Completed zipping %s: %s %s' %(filename, p.name, p.pid))", "def archive(self):\n logging.info(_('Creating compressed archive...'))\n\n report_file_ext = 'bz2'\n compressor = 'bzip2'\n caller = Caller({})\n try:\n caller.call('xz --version')\n report_file_ext = 'xz'\n compressor = 'xz'\n except Exception:\n logging.debug('xz compression not available')\n\n if not os.path.exists(self.conf[\"output\"]):\n os.makedirs(self.conf[\"output\"])\n\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s.tar.%s\" % (\n 'LogCollector',\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n if self.conf[\"ticket_number\"]:\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s-%s.tar.%s\" % (\n 'LogCollector',\n self.conf[\"ticket_number\"],\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n config = {\n 'report': os.path.splitext(self.conf['path'])[0],\n 'compressed_report': self.conf['path'],\n 'compressor': compressor,\n 'directory': self.conf[\"local_tmp_dir\"],\n 'rname': os.path.basename(self.conf['path']).split('.')[0],\n }\n caller.configuration = config\n shutil.move(\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n 'working'\n ),\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n config[\"rname\"]\n ),\n )\n caller.call(\"tar -cf '%(report)s' -C '%(directory)s' '%(rname)s'\")\n shutil.rmtree(self.conf[\"local_tmp_dir\"])\n caller.call(\"%(compressor)s -1 '%(report)s'\")\n os.chmod(self.conf[\"path\"], stat.S_IRUSR | stat.S_IWUSR)\n sha256_out = caller.call(\"sha256sum '%(compressed_report)s'\")\n checksum = sha256_out.split()[0]\n with open(\"%s.sha256\" % self.conf[\"path\"], 'w') as checksum_file:\n checksum_file.write(sha256_out)\n\n msg = ''\n if os.path.exists(self.conf[\"path\"]):\n archiveSize = float(os.path.getsize(self.conf[\"path\"])) / (1 << 20)\n\n size = '%.1fM' % archiveSize\n\n msg = _(\n 'Log files have been collected and placed in {path}\\n'\n 'The sha256 for this file is {checksum} and its size is {size}'\n ).format(\n path=self.conf[\"path\"],\n size=size,\n checksum=checksum,\n )\n\n if archiveSize >= 1000:\n msg += _(\n '\\nYou can use the following filters -c, -d, -H in the '\n 'next execution to limit the number of Datacenters,\\n'\n 'Clusters or Hosts that are collected in order to '\n 'reduce the archive size.'\n )\n return msg", "def save_compressed(data, filename, compression_type='bz2', create_link=False):\n # write to compressed HDF5 file\n hdf5 = open_compressed(filename, 'w')\n save(data, hdf5)\n close_compressed(filename, hdf5, compression_type, create_link)", "def create_compressed_file(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name)\n\t\tself._is_png = 'png' in self.file_extension\n\t\tself._is_jpg = 'jpg' in self.file_extension\n\n\t\timage = Image.open(self.full_path)\n\n\t\tif self._is_png:\n\t\t\timage.save(self._compressed_save_path, quality=85, optimize=False, compress_level=9)\n\t\telif self._is_jpg:\n\t\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\t\telse:\n\t\t\tprint('Non-recognized asset format!!')\n\t\t\texit()\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)" ]
[ "0.7248893", "0.6594647", "0.6594647", "0.64348674", "0.64283776", "0.6283818", "0.62432826", "0.6221492", "0.6214257", "0.6159141", "0.6146967", "0.6120605", "0.6105703", "0.60970914", "0.6093049", "0.6085339", "0.6084827", "0.6059032", "0.6022121", "0.60189515", "0.59890485", "0.59226716", "0.5919658", "0.5906914", "0.5896386", "0.58778226", "0.58763033", "0.5867575", "0.58499646", "0.58166057" ]
0.7983229
1
Shows a specific plane within 3D data.
def show_plane(axis, plane, cmap="gray", title=None): axis.imshow(plane, cmap=cmap) axis.set_xticks([]) axis.set_yticks([]) if title: axis.set_title(title) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def plane_2d(self, quantity, plane, pval, draw=False, fixed=None):\n self.log.info('Plotting plane')\n pval = int(pval)\n # x = np.arange(0, self.period, self.dx)\n # y = np.arange(0, self.period, self.dy)\n # z = np.arange(0, self.height + self.dz, self.dz)\n x = self.X\n y = self.Y\n z = self.Z\n # Get the scalar values\n freq = self.conf['Simulation']['params']['frequency']\n wvlgth = (consts.c / freq) * 1E9\n title = 'Frequency = {:.4E} Hz, Wavelength = {:.2f} nm'.format(\n freq, wvlgth)\n # Get the plane we wish to plot\n cs = self.get_plane(quantity, plane, pval)\n self.log.info('DATA SHAPE: %s' % str(cs.shape))\n show = self.conf['General']['show_plots']\n p = False\n sim_dir = os.path.expandvars(self.conf['General']['sim_dir'])\n if plane == 'yz' or plane == 'zy':\n labels = ('y [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_yz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(y, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xz' or plane == 'zx':\n labels = ('x [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xy' or plane == 'yx':\n labels = ('y [um]', 'x [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xy_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, y, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)", "def plane(self):\n return plane(self.N, self.o)", "def slice_explorer(data, cmap='gray'):\n data_len = len(data)\n\n @interact(plane=(0, data_len-1), continuous_update=False)\n def display_slice(plane=data_len/2):\n fig, axis = plt.subplots(figsize=(20, 7))\n axis_3d = fig.add_subplot(133, projection='3d')\n show_plane(axis, data[plane], title='Plane {}'.format(plane), cmap=cmap)\n slice_in_3d(axis=axis_3d, shape=data.shape, plane=plane)\n plt.show()\n\n return display_slice", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def GetPlane(plane):\r\n pass", "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def show(self, surface):\n if self.area_show:\n self.showArea(surface)\n if self.side_show:\n self.showSegments(surface)\n if self.point_show:\n self.showPoints(surface)", "def show(data_set, number_points: int):\n print(f'info: Showing {number_points} as maximum.')\n sub_set_points = np.random.choice(range(data_set.shape[0]), size=min(data_set.shape[0], number_points))\n x = data_set[sub_set_points, 0]\n y = data_set[sub_set_points, 1]\n z = data_set[sub_set_points, 2]\n\n fig = plt.figure(figsize=(8, 8))\n ax = mplot3d.Axes3D(fig)\n ax.set_title('NMSLIB index 3D representation', fontsize=20)\n ax.scatter(xs=x, ys=y, zs=z)\n plt.show()", "def plane(*args, length: float=0.0, name: AnyStr=\"\", position: List[float, float, float]=None,\n rotation: List[float, float, float]=None, size: float=0.0, width: float=0.0,\n **kwargs)->AnyStr:\n pass", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def PlotAirplane():\n airplane = vtkInterface.PolyData(planefile)\n airplane.Plot()", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def p(self):\n return 'Plane'", "def mesh_slicer(self, plane, opt):\n\n # get plane coefficients\n a = plane[0]\n b = plane[1]\n c = plane[2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n # for now we choose the center point as the point of rotation\n VTKplane.SetOrigin(self.mesh_poly.GetCenter())\n VTKplane.SetNormal(a, b, c)\n VTKplane.SetOrigin(self.epi_apex_node)\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.mesh_poly)\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.SetValue(0, 0.5)\n\n # create renderer\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n\n # create mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(cutEdges.GetOutputPort())\n\n # create actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.0, 0.0, 1.0)\n actor.GetProperty().SetLineWidth(2)\n\n # display apex point\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 1))\n\n if (opt == 'mesh'):\n meshMapper = vtk.vtkPolyDataMapper()\n meshMapper.SetInputData(self.mesh_poly)\n meshActor = vtk.vtkActor()\n meshActor.SetMapper(meshMapper)\n meshActor.GetProperty().SetColor(1.0, 0.0, 0.0)\n\n # generate renderer\n ren.AddActor(self.meshActor)\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n else:\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n # display\n vtk_show(ren)", "def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)", "def slice_in_3d(axis, shape, plane):\n Z = np.array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 1, 1]])\n\n Z = Z * shape\n\n r = [-1, 1]\n\n X, Y = np.meshgrid(r, r)\n\n # plotting vertices\n axis.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])\n\n # list of sides' polygons of figure\n verts = [[Z[0], Z[1], Z[2], Z[3]],\n [Z[4], Z[5], Z[6], Z[7]],\n [Z[0], Z[1], Z[5], Z[4]],\n [Z[2], Z[3], Z[7], Z[6]],\n [Z[1], Z[2], Z[6], Z[5]],\n [Z[4], Z[7], Z[3], Z[0]],\n [Z[2], Z[3], Z[7], Z[6]]]\n\n # plotting sides\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors=(0, 1, 1, 0.25),\n linewidths=1,\n edgecolors='darkblue')\n )\n\n verts = np.array([[[0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [0, 1, 0]]])\n verts = verts * shape\n verts += [plane, 0, 0]\n\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors='magenta',\n linewidths=1,\n edgecolors='black')\n )\n\n axis.set_xlabel('plane')\n axis.set_ylabel('col')\n axis.set_zlabel('row')\n\n # auto-scale plot axes\n scaling = np.array([getattr(axis, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n axis.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)\n\n return None", "def plot_sag_plane(self, P0=None, sag_pl=None):\n if P0 is None: P0 = np.array([0,0,0])\n if sag_pl is None: sag_pl = self.sp\n norm, d = sag_pl[:3], sag_pl[3]\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.ion()\n # create x,y\n xypts = 10\n xrng = 300\n yrng = 130\n xrng_mesh = np.linspace(P0[0], P0[0]-xrng, xypts)\n yrng_mesh = np.linspace(P0[1]-yrng/2., P0[1]+yrng, xypts)\n xx, yy = np.meshgrid(xrng_mesh, yrng_mesh)\n # calculate corresponding z\n zz = -1 * (norm[0] * xx + norm[1] * yy + d) / norm[2]\n # plot the surface\n self.fig = plt.figure()\n self.fig_ax = self.fig.add_subplot(111, projection='3d')\n self.fig_ax.plot_wireframe(xx, yy, zz, color='gray')\n #ax.quiver(P0[0], P0[1], norm[0], norm[1])\n self.fig_ax.set_xlabel('X')\n self.fig_ax.set_ylabel('Y')\n self.fig_ax.set_zlabel('Z')\n self.fig_ax.set_zlim(P0[2]-xrng, P0[2]+yrng)\n plt.show()", "def view(config_file):\n import open3d as o3d\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n for scene in scenes:\n # if scene['scene_name'] != \"Scene_004\":\n # continue\n scene_data = get_data_from_scene(scene)\n logger.info(\"Visualizing - %s\", scene['scene_name'])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d']))\n o3d.visualization.draw_geometries_with_editing([pcd])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d_segmented']))\n o3d.visualization.draw_geometries([pcd])", "def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()", "def layer_show_3D(layers, width, accuracys, title, path):\n fig = plt.figure(dpi=120, figsize=(8, 6))\n ax = Axes3D(fig)\n fit = inp.interp2d(layers, width, accuracys)\n y_n = np.linspace(min(layers), max(layers), 5120)\n x_n = np.linspace(min(width), max(width), 5120)\n epoches_n = fit(y_n, x_n)\n surf = ax.plot_surface(y_n, x_n, epoches_n, cmap=cm.rainbow)\n # plt.title(title)\n ax.set_xlabel('layers number')\n ax.set_ylabel('kernel width')\n ax.set_zlabel('accuracy')\n fig.colorbar(surf, shrink=0.5, aspect=5)\n # plt.tight_layout()\n plt.savefig(path)", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def WritePlane(self):\n if not self.__train:\n print('ERROR: Must use Train before WritePlane')\n sys.exit(-1)\n if not self.__openPlaneO:\n print('ERROR: Must use OpenPlaneO before WritePlane')\n sys.exit(-1)\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('type', self.__n_type)\n\n # Defines variables\n if self.__containsRadial:\n rad_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'radial_plane', 'f4', \\\n ('type','radial_structure_functions'))\n rad_plane_id[:] = self.radial_plane\n if self.__containsAngular:\n ang_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'angular_plane', 'f4', \\\n ('type','angular_structure_functions'))\n ang_plane_id[:] = self.angular_plane\n intercept_id_O = self.__nc_RSoft_O.createVariable(\\\n 'intercept', 'f4', ('type'))\n intercept_id_O[:] = self.intercept", "def plot_phase_plane_trajectory_3d(self , x_axis=0, y_axis=1, z_axis=2):\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n plotter = self.get_plotter()\n \n return plotter.phase_plane_trajectory_3d( \n self.traj, x_axis , y_axis, z_axis)", "def view(input_file, is_3d, plane, backend, realistic_diameters):\n # pylint: disable=import-outside-toplevel\n is_matplotlib = backend == 'matplotlib'\n if is_matplotlib:\n if is_3d:\n _, ax = matplotlib_utils.get_figure(params={'projection': '3d'})\n plot = partial(matplotlib_impl.plot_morph3d, ax=ax)\n else:\n _, ax = matplotlib_utils.get_figure()\n plot = partial(matplotlib_impl.plot_morph, ax=ax,\n plane=plane, realistic_diameters=realistic_diameters)\n else:\n from neurom.view import plotly_impl\n if is_3d:\n plot = plotly_impl.plot_morph3d\n else:\n plot = partial(plotly_impl.plot_morph, plane=plane)\n\n plot(load_morphology(input_file))\n if is_matplotlib:\n if not is_3d:\n plt.axis('equal')\n plt.show()", "def get_plane(self, quantity, plane, pval):\n\n self.log.info('Retrieving plane for %s', quantity)\n scalar = self.get_scalar_quantity(quantity)\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def draw_plane(env, transform, extents=(4,4), texture=None):\n if texture is None:\n texture = np.zeros((100,100,4))\n texture[:,:,1] = 0.2\n texture[:,:,2] = 0.2\n texture[:,:,3] = 0.2\n with env:\n h = env.drawplane(transform, extents=extents, texture=texture)\n return h" ]
[ "0.6780227", "0.666365", "0.6577502", "0.6563549", "0.63409144", "0.624834", "0.6219757", "0.6199828", "0.6173089", "0.6126187", "0.6115501", "0.6100461", "0.6099733", "0.6040154", "0.6024716", "0.6006487", "0.5987519", "0.59277785", "0.5921929", "0.5920941", "0.5902615", "0.5893766", "0.5876073", "0.58410853", "0.5828672", "0.5816446", "0.5815895", "0.5786157", "0.5762912", "0.57516676" ]
0.67553663
1
Allows to explore 2D slices in 3D data.
def slice_explorer(data, cmap='gray'): data_len = len(data) @interact(plane=(0, data_len-1), continuous_update=False) def display_slice(plane=data_len/2): fig, axis = plt.subplots(figsize=(20, 7)) axis_3d = fig.add_subplot(133, projection='3d') show_plane(axis, data[plane], title='Plane {}'.format(plane), cmap=cmap) slice_in_3d(axis=axis_3d, shape=data.shape, plane=plane) plt.show() return display_slice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_slice(img_3D, view):\n input_type = isinstance(img_3D, np.ndarray)\n if input_type:\n img_3D = [img_3D]\n img_shape = img_3D[0].shape\n if view == \"sag\":\n slice_pos = np.random.randint(int(0.2 * img_shape[0]), int(0.8 * img_shape[0]))\n imgs_2D = [imgg_3D[slice_pos, :, :] for imgg_3D in img_3D]\n elif view == \"cor\":\n slice_pos = np.random.randint(int(0.2 * img_shape[1]), int(0.8 * img_shape[1]))\n imgs_2D = [imgg_3D[:, slice_pos, :] for imgg_3D in img_3D]\n else:\n slice_pos = np.random.randint(int(0.2 * img_shape[2]), int(0.8 * img_shape[2]))\n imgs_2D = [imgg_3D[:, :, slice_pos] for imgg_3D in img_3D]\n # img_2D = np.expand_dims(img_2D, 2)\n if input_type:\n return imgs_2D[0]\n return imgs_2D", "def __getitem__( self, index ):\n \n # check dimensions - this rules out the ellisis (...) for the moment\n if len( index ) != 3:\n raise ValueError(\"This is a three-dimensional object, please index accordingly.\")\n\n # get involved file numbers and steps\n # if index[0] is a single number (not iterable, not a slice), make a list of it\n if not hasattr( index[0], '__iter__') and not isinstance( index[0], slice ):\n valid_steps = self._valid_steps[ [index[0]], :2 ]\n else:\n valid_steps = self._valid_steps[ index[0], :2 ]\n \n # if image should be cropped make sure that slices stay slices (is about 30% faster)\n if self._cropped: \n if isinstance( index[1], slice ):\n a = self._ymin if index[1].start is None else index[1].start+self._ymin\n b = self._ymax if index[1].stop is None else index[1].stop+self._ymin \n internal_index1 = slice( a, b, index[1].step )\n \n else:\n internal_index1 = np.arange( self._ymin, self._ymax )[ index[1] ]\n \n if isinstance( index[2], slice ):\n a = self._xmin if index[2].start is None else index[2].start+self._xmin\n b = self._xmax if index[2].stop is None else index[2].stop+self._xmin \n internal_index2 = slice( a, b, index[2].step )\n \n else:\n internal_index2 = np.arange( self._xmin, self._xmax )[ index[2] ]\n\n else:\n internal_index1 = index[1]\n internal_index2 = index[2]\n\n # get all data slices and concatenate them (always use data interface here, regardless of whether memory mapping is used or not)\n slices = []\n for file_no in np.unique( valid_steps[:,0] ):\n file = ir.file_hub.open( self._files[file_no] )\n slices.append( file[ self._selected_ext ].data[ valid_steps[ valid_steps[:,0] == file_no, 1 ], internal_index1, internal_index2 ] )\n slices = np.vstack( slices )\n\n # remove first dimension if there is only one slice\n if slices.shape[0] == 1 and slices.ndim == 3:\n slices = slices[0,:,:]\n \n return slices", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def __getslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___getslice__(self, *args)", "def surf_2d_slice(data):\n X = np.array(data['x'])\n Y = np.array(data['y'])\n Z = np.array(data['z'])\n\n Xgrid, Ygrid = np.meshgrid(X,Y)\n zlevel = Z[0]\n\n Tslice = np.array(data['T'][0,:, :])\n\n fig, ax = plt.subplots()\n\n # Tslice = np.ma.masked_less_equal(Tslice, 11.001)\n\n pcolor = ax.pcolormesh(Xgrid, Ygrid, Tslice, vmin = 11, vmax = 13.5, cmap = 'Blues')\n\n ax.set_xlabel('x [km]')\n ax.set_ylabel('y [km]')\n\n ax.set_title(\"T:%s at z = %s\" % (os.path.split(data.filepath())[1], zlevel))\n\n\n fig.colorbar(pcolor)\n\n fig.savefig('MITgcmpyvis.png',dpi = 500)", "def viz(self,slices):\n #layers_copy = deepcopy(self.layers)\n self.layers_copy = self.layers\n imgs = [torch.zeros([1,3,self.N_in,self.N_in])]\n \n for layer in self.layers:\n if isinstance(layer,nn.Conv2d):\n layer2 = nn.Conv2d(3,3,layer.kernel_size,layer.stride,layer.padding)\n imgs.append(layer2(imgs[-1]))\n else:\n imgs.append(layer(imgs[-1]))\n \n assert(len(self.projs) == len(imgs)-1)\n for proj,img in zip(self.projs[::-1],imgs[::-1]):\n (x1,x2),(y1,y2) = slices\n img[0,:,x1:x2+1,y1:y2+1] = 255\n slices = proj(slices)\n (x1,x2),(y1,y2) = slices\n imgs[0][0,:,x1:x2+1,y1:y2+1] = 255\n \n dim = int(np.floor(np.sqrt(len(self.layers))))+1\n fig,axes = plt.subplots(dim,dim,figsize=(10,10))\n for i,img in enumerate(imgs):\n a,b = np.unravel_index(i,(dim,dim))\n axes[a,b].imshow((img[0].detach().permute(1,2,0).numpy()).astype(np.uint8))\n axes[a,b].set_title(str(i))", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def show_current_pair_by_3d_slice(iS,iT):\n import matplotlib.pyplot as plt\n import easyreg.viewers as viewers\n fig, ax = plt.subplots(2,3)\n plt.setp(plt.gcf(), 'facecolor', 'white')\n plt.style.use('bmh')\n\n ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)\n ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)\n ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)\n\n ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)\n ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)\n ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)\n\n\n feh = viewers.FigureEventHandler(fig)\n feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)\n\n feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)\n\n feh.synchronize([ax[0][0], ax[1][0]])\n feh.synchronize([ax[0][1], ax[1][1]])\n feh.synchronize([ax[0][2], ax[1][2]])", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def _setitem3d(self, index, value):\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iy, slice): sss[1:1] = [1]\n if not isinstance(iz, slice): sss[2:2] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value", "def get2DSlice( self, slices: list ):\n assert(len(slices)==self._nDims-2)\n slices.extend([slice(self._nGlobalCoords[self._layout.dims_order[-2]]),\n slice(self._nGlobalCoords[self._layout.dims_order[-1]])])\n return self._f[tuple(slices)]", "def get_slice(P1, P2, name):\n \n centre_dist = distance_3D(P1, P2)\n plot_img = np.zeros((ceil(centre_dist / 2. + 1), centre_dist + 2 ))\n Xrange = np.arange(-centre_dist / 4., centre_dist / 4. + 1)\n \n # time goes along the vector between P1 and P2\n # since it might be at an angle, I can't loop in 1\n # pixel increments - this will miss certain slices. Therefore,\n # I need to loop through by 1/cosA, where A is angle between\n # the xy plane and vector P1->P2\n sampling = sample_rate(P1, P2)\n \n for time in np.linspace(0, centre_dist + 1,\n centre_dist * sampling):\n # Go up along the line\n new_pt = vector_3D(P1, P2, time)\n old_pt = vector_3D(P1, P2, time - centre_dist / 2. * sampling)\n\n if time == 0:\n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n # Check if the previous slice is the same as the next\n # don't load it again if it is - save computation time\n if int(round(new_pt[2], 0)) != int(round(old_pt[2], 0)):\n \n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n \n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n else:\n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n\n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n \n return plot_img", "def __getitem__(self, i):\n item_out = super(Vector3Array, self).__getitem__(i)\n if np.isscalar(i):\n return item_out.view(Vector3)\n if isinstance(i, slice):\n return item_out\n return item_out.view(np.ndarray)", "def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___getslice__(self, *args)", "def test_get_second_slice(self):\n self.init()\n assert np.all(get_second_slice(self.i64_3) == self.i64_3[:,:,1])\n assert np.all(get_second_slice(self.fi64_3) == self.fi64_3[:,:,1])\n assert np.all(get_second_slice(self.f64_3) == self.f64_3[:,:,1])\n assert np.all(get_second_slice(self.ff64_3) == self.ff64_3[:,:,1])\n assert get_second_slice(self.i64_3).shape == (3,3)\n assert get_second_slice(self.fi64_3).shape == (3,3)\n assert get_second_slice(self.f64_3).shape == (3,3)\n assert get_second_slice(self.ff64_3).shape == (3,3)\n assert get_second_slice(self.i64_3).dtype == 'float64'\n assert get_second_slice(self.fi64_3).dtype == 'float64'\n assert get_second_slice(self.f64_3).dtype == 'float64'\n assert get_second_slice(self.ff64_3).dtype == 'float64'\n assert get_second_slice(self.i64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.fi64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.f64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.ff64_3).flags['F_CONTIGUOUS'] == True", "def mesh_slice(V,n,X,Y,Z):\n from matplotlib import cm\n import mpl_toolkits.mplot3d.axes3d as p3\n import time\n order=np.array([(1,2,0),(2,0,1),(0,1,2)])\n q=np.transpose(V,(order[n])) # See projection for why we could also use take instead.\n if n==0: # Make a less cumbersome and more consistent version of this?\n i,j=X,Y\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==1:\n i,j=Y,Z\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==2:\n i,j=Z,X\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n labels={\n 0:('horizontal axial (mm)','height (mm)'),\n 1:('horizontal radial (mm)','horizontal axial (mm)'),\n 2:('height (mm)','horizontal radial (mm)')\n } \n class animated(object): # 4D, plots f(x,y,z0) specific to mesh_slice.\n def __init__(self,I,J,q):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n self.I,self.J=I,J\n self.q=q[:,0,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.q,cmap=cm.coolwarm,antialiased=False)\n def drawNow(self,ii,q,n):\n self.surf.remove()\n self.slc=q[:,ii,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.slc,cmap=cm.coolwarm,antialiased=False)\n plt.ylabel(labels[n][1])\n plt.xlabel(labels[n][0])\n #plt.title(ii) #Optional: this moves down during animation.\n plt.draw() # redraw the canvas\n time.sleep(0.01)\n self.fig.show()\n anim=animated(I,J,q)\n for ii in range(q.shape[1]):\n if ii==q.shape[1]-1:\n plt.title('Animation complete.')\n anim.drawNow(ii,q,n)\n return plt.show()", "def slice_dims(data_array: sc.DataArray, slices: Dict[str, slice]) -> sc.DataArray:\n out = data_array\n for dim, sl in slices.items():\n out = out[dim, sl]\n return out", "def input_data_slice(model, Green_slice=1):\n\n # Define axis limit\n x1min = np.min(model.x_G)\n x1max = np.max(model.x_G)\n x2min = np.min(model.y_G)\n x2max = np.max(model.y_G)\n x1_middle = 0.5 * (x1max + x1min)\n x2_middle = 0.5 * (x2max + x2min)\n\n # Compute the input data for the selected slice\n if Green_slice == 1:\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n x1 = X_G.flatten()[:, None]\n x2 = Y_G.flatten()[:, None]\n y1 = x1_middle * np.ones(x1.shape)\n y2 = x2_middle * np.ones(x2.shape)\n axis_limit = [x1min, x1max, x2min, x2max]\n axis_labels = ['$x_1$', '$x_2$',\n '$(x_1,x_2,%.1f,%.1f)$' % (x1_middle, x2_middle)]\n\n elif Green_slice == 2:\n X_G, Y_G = np.meshgrid(model.x_G, model.x_G)\n x1 = X_G.flatten()[:, None]\n y1 = Y_G.flatten()[:, None]\n x2 = x2_middle * np.ones(x1.shape)\n y2 = x2_middle * np.ones(y1.shape)\n axis_limit = [x1min, x1max, x1min, x1max]\n axis_labels = ['$x_1$', '$y_1$',\n '$(x_1,%.1f,y_1,%.1f)$' % (x2_middle, x2_middle)]\n\n elif Green_slice == 3:\n X_G, Y_G = np.meshgrid(model.y_G, model.y_G)\n x2 = X_G.flatten()[:, None]\n y2 = Y_G.flatten()[:, None]\n x1 = x1_middle * np.ones(x2.shape)\n y1 = x1_middle * np.ones(y2.shape)\n axis_limit = [x2min, x2max, x2min, x2max]\n axis_labels = ['$x_2$', '$y_2$',\n '$(%.1f,x_2,%.1f,y_2)$' % (x1_middle, x1_middle)]\n\n elif Green_slice == 4:\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n y1 = X_G.flatten()[:, None]\n y2 = Y_G.flatten()[:, None]\n x1 = x1_middle * np.ones(y1.shape)\n x2 = x2_middle * np.ones(y2.shape)\n axis_limit = [x1min, x1max, x2min, x2max]\n axis_labels = ['$y_1$', '$y_2$',\n '$(%.1f,%.1f,y_1,y_2)$' % (x1_middle, x2_middle)]\n\n else:\n raise ValueError(\n \"Function not implemented for selected Green slice argument.\")\n\n # Define input data and shape of the Green's function\n input_data = np.concatenate(\n (x1, x2, y1, y2), 1).astype(dtype=config.real(np))\n shape_Green = X_G.shape\n return input_data, shape_Green, axis_limit, axis_labels", "def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def __getslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint2___getslice__(self, *args)", "def voxel_slice(self,slicePoint,points,triangles,res,llc,sliceProto,direction):\n\t\tdef getDirectionArray(x):\n\t\t\treturn {\n\t\t\t\t0:numpy.array([1,0,0]),\n\t\t\t\t1:numpy.array([0,1,0]),\n\t\t\t\t2:numpy.array([0,0,1]),\n\t\t\t\t}.get(x, numpy.array([0,0,1]))\n\t\t\n\t\tdirectionArray = getDirectionArray(direction)\n\n\t\tcontours=self.find_intersection_contours(points,triangles,slicePoint, directionArray)\n\t\tcontours2=self.find_intersection_contours(points,triangles,slicePoint+directionArray*0.001,directionArray)\n\t\tcontours3=self.find_intersection_contours(points,triangles,slicePoint-directionArray*0.001,directionArray)\n\t\t#result=numpy.array(voxelize_contours(contours,res,llc,sliceProto),dtype='int64')\n\t\t#result2=numpy.array(voxelize_contours(contours2,res,llc,sliceProto),dtype='int64')\n\t\t#result3=numpy.array(voxelize_contours(contours3,res,llc,sliceProto),dtype='int64')\n\t\t#print numpy.sum(result),numpy.sum(result2),numpy.sum(result3)\n\t\t#fixedResult=numpy.zeros(result.shape,dtype='bool')\n\t\t#fixedResult[numpy.nonzero(result+result2+result3>=2)]=True # set to True if the voxel is present in 2 of 3 slices\n\t\tresult=self.voxelize_contours(contours,res,llc,sliceProto, direction)\n\t\tresult2=self.voxelize_contours(contours2,res,llc,sliceProto, direction)\n\t\tresult3=self.voxelize_contours(contours3,res,llc,sliceProto, direction)\n\t\tfixedResult=(result&result2)|(result&result3)|(result3&result2)\n\t\treturn fixedResult", "def slice_in_3d(axis, shape, plane):\n Z = np.array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 1, 1]])\n\n Z = Z * shape\n\n r = [-1, 1]\n\n X, Y = np.meshgrid(r, r)\n\n # plotting vertices\n axis.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])\n\n # list of sides' polygons of figure\n verts = [[Z[0], Z[1], Z[2], Z[3]],\n [Z[4], Z[5], Z[6], Z[7]],\n [Z[0], Z[1], Z[5], Z[4]],\n [Z[2], Z[3], Z[7], Z[6]],\n [Z[1], Z[2], Z[6], Z[5]],\n [Z[4], Z[7], Z[3], Z[0]],\n [Z[2], Z[3], Z[7], Z[6]]]\n\n # plotting sides\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors=(0, 1, 1, 0.25),\n linewidths=1,\n edgecolors='darkblue')\n )\n\n verts = np.array([[[0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [0, 1, 0]]])\n verts = verts * shape\n verts += [plane, 0, 0]\n\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors='magenta',\n linewidths=1,\n edgecolors='black')\n )\n\n axis.set_xlabel('plane')\n axis.set_ylabel('col')\n axis.set_zlabel('row')\n\n # auto-scale plot axes\n scaling = np.array([getattr(axis, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n axis.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)\n\n return None", "def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n # FIXME: Doesn't always return the expected shape\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n\n max_missing = ((center + half_size + 1) - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')", "def get_2D_slices(atlas: str):\n if atlas == \"avg\":\n AVGT, metaAVGT = nrr.read(\"nrrd/average_template_10.nrrd\")\n folder = \"avg\"\n elif atlas == \"nissl\":\n AVGT, metaAVGT = nrr.read(\"nrrd/ara_nissl_10.nrrd\")\n folder = \"nissl\"\n else:\n raise Exception(\"Wrong argument for var atlas\")\n\n degress = [i for i in range(-15, 16, 3)]\n\n for degree in tqdm(degress):\n rotated = rot(AVGT, angle=degree, mode=\"nearest\", order=0, reshape=True)\n print(degree)\n for plate_no in tqdm(range(300, 1300, 5)):\n save_img(rotated[plate_no], folder=folder, plate_no=plate_no, degree=degree)", "def yz_slice(data, index):\n X = np.array(data['x'])\n Y = np.array(data['y'])\n Z = np.array(data['z'])\n print(Z)\n\n Tslice = np.array(data['T'][:, :, index])\n print(Tslice.shape)\n\n Ygrid, Zgrid = np.meshgrid(Y, Z)\n xlevel = X[index]\n fig, ax = plt.subplots()\n # Tslice = np.ma.masked_less_equal(Tslice, 11.001)\n pcolor = ax.pcolormesh(Ygrid, Zgrid, Tslice, vmin = 11, vmax = 13.5, cmap = 'Blues')\n\n ax.invert_yaxis()\n ax.set_xlabel('y [km]')\n ax.set_ylabel('z [m]')\n ax.set_title(\"T:%s at x = %s\" % (os.path.split(data.filepath())[1], xlevel))\n\n\n fig.colorbar(pcolor)\n\n fig.savefig('MITgcmyz.png',dpi = 500)", "def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def getslice(self, *args, **kwargs):\n return _image.image_getslice(self, *args, **kwargs)", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v" ]
[ "0.7107621", "0.6738053", "0.6709929", "0.6706025", "0.6513878", "0.64924455", "0.6423735", "0.63151467", "0.63149804", "0.6307343", "0.62615204", "0.6126598", "0.6090667", "0.607543", "0.6070433", "0.60660744", "0.60097694", "0.6005962", "0.5995597", "0.597353", "0.59621847", "0.5899215", "0.5880683", "0.5849116", "0.5827684", "0.5824759", "0.5808791", "0.58016115", "0.5773312", "0.57586557" ]
0.7134606
0
Generates a 3D surface plot for the specified region.
def plot_3d_surface(data, labels, region=3, spacing=(1.0, 1.0, 1.0)): properties = measure.regionprops(labels, intensity_image=data) # skimage.measure.marching_cubes expects ordering (row, col, plane). # We need to transpose the data: volume = (labels == properties[region].label).transpose(1, 2, 0) verts_px, faces_px, _, _ = measure.marching_cubes_lewiner(volume, level=0, spacing=(1.0, 1.0, 1.0)) surface_area_pixels = measure.mesh_surface_area(verts_px, faces_px) verts_actual, faces_actual, _, _ = measure.marching_cubes_lewiner(volume, level=0, spacing=tuple(spacing)) surface_area_actual = measure.mesh_surface_area(verts_actual, faces_actual) print('Surface area\n') print(' * Total pixels: {:0.2f}'.format(surface_area_pixels)) print(' * Actual: {:0.2f}'.format(surface_area_actual)) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') mesh = Poly3DCollection(verts_px[faces_px]) mesh.set_edgecolor('black') ax.add_collection3d(mesh) ax.set_xlabel('col') ax.set_ylabel('row') ax.set_zlabel('plane') min_pln, min_row, min_col, max_pln, max_row, max_col = properties[region].bbox ax.set_xlim(min_row, max_row) ax.set_ylim(min_col, max_col) ax.set_zlim(min_pln, max_pln) plt.tight_layout() plt.show() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def plot3d(data):\n assert span1 == span2\n span = span1\n # ---------------------- create the figure and axes ---------------------- #\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # -- discretize the definition space and compute the function's images --- #\n X, Y = discretise_space([defspace1, defspace2], n=span)\n Z = data\n\n # ----------------------- appearance and plotting ------------------------ #\n ax.set_zlim(np.min(Z) - 0.5, np.max(Z) + 0.5)\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set(xlabel='$W\\_C$', ylabel='$W\\_W$', zlabel=\"Utilité\")#,\n # title='Utilité à {} ticks en fonction de W_W et W_C'.format(ticks))\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, alpha=0.8, #, cmap='binary'\n linewidth=0, antialiased=False, zorder=1)\n\n plt.show()", "def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def plotSurface(X):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d import proj3d\n f=plt.figure()\n ax=f.add_subplot(111,projection='3d')\n xi=np.arange(10,14,0.05)\n yi=np.arange(12,16,0.05)\n z = matplotlib.mlab.griddata(X[:,0], X[:,1], X[:,2], xi, yi, interp='nn')\n x, y = np.meshgrid(xi, yi)\n ax.plot_surface(x, y, z)\n return f", "def plot3d(self):\n plot_rupture_wire3d(self)", "def plot_3D(Y_data, num_area):\n ref_shape = [Y_data.shape[0], Y_data.shape[1], Y_data.shape[2]]\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n for a in np.arange(1, num_area+1):\n loc = np.where(Y_data == a)\n ax.scatter3D(loc[0], loc[1], loc[2], marker=\".\", alpha=0.9)\n\n plt.show()", "def plot_3d(self, ax_3d: Axes3D, n_angles: int = 30, **kwargs) -> None:\n X, Y, Z = self.to_mesh(n_angles)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)", "def plotTerrain3d(self, gdf: gpd.GeoDataFrame, fig_size: tuple=(12, 10), size: float=0.01):\n fig, ax = plt.subplots(1, 1, figsize=fig_size)\n ax = plt.axes(projection='3d')\n ax.scatter(gdf.geometry.x, gdf.geometry.y, gdf.elevation, s=size)\n plt.show()", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def plot_surface(self, varname):\n\n if self.is_vr:\n self._plot_vr_surface(varname)\n else:\n self._plot_sr_surface(varname)", "def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def plot_bivariate_3d(X, Y, Z, bounds, title, **kwargs):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_yticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_xlim(bounds)\n ax.set_ylim(bounds)\n ax.plot_surface(X,Y,Z, **kwargs)\n plt.title(title)\n plt.show()", "def surf_plot(x, y, z, filename, title = None, xlabel = None, ylabel = None, zlabel = None, elev = 0, azim = 0, **surf_kwargs):\n # Checking that the x- and y- and z- inputs are equal in length \n if len(x) != len(y) != len(z):\n raise LengthError()\n\n fig = plt.figure() # Creates blank figure\n ax = fig.gca(projection='3d') # Creating 3-dimensional axes\n fig.set_size_inches(18, 10) # Sets figure size\n\n # Plotting the surface - specifying the colormap, and setting the surface to opaque (with antialiased = False)\n ax.plot_trisurf(x, y, z, cmap = cm.coolwarm, linewidth=0, antialiased=False, **surf_kwargs) \n\n # Setting plot parameters\n ax.set_title(title, fontsize = 24, pad = 15)\n ax.set_xlabel(xlabel, fontsize=18, labelpad = 15)\n ax.set_ylabel(ylabel, fontsize=18, labelpad = 15)\n ax.set_zlabel(zlabel, fontsize=18, labelpad = 15)\n ax.tick_params(axis='both', which='major', pad=10)\n ax.set_zlim(0, 1.0) # z-axis limits set to [0,1] as the z-axis refers to probability in our case.\n\n ax.view_init(elev=elev, azim=azim) # Sets 'camera angle' of surface plot, for saving\n # f-string allows save filepath to be set inside the plt.savefig() function\n plt.savefig(f'{os.path.join(plot_path,filename)}.pdf', dpi = 200) # Saving the plot in the 'plots' folder", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def render_3d(projection, **kwds):\n if isinstance(projection, Polyhedron): projection = Projection(projection)\n return \\\n projection.render_vertices_3d(width=3, color='green', **kwds) +\\\n projection.render_wireframe_3d(width=3, color='green', **kwds) + \\\n projection.render_solid_3d(**kwds)", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def plot_3d(x, y):\n # Create grid coordinates\n x_axis = np.linspace(-10, 10, 50)\n y_axis = np.linspace(-1, 4, 50)\n xx, yy = np.meshgrid(x_axis, y_axis, indexing='xy')\n z = np.zeros((x_axis.size, y_axis.size))\n\n # Calculate z-values based on grid coefficients\n for (i, j), v in np.ndenumerate(z):\n z[i, j] = compute_cost(x, y, theta=[[xx[i, j]], [yy[i, j]]])\n\n # Construct plot\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet)\n ax.set_zlabel('Cost')\n ax.set_zlim(z.min(), z.max())\n ax.view_init(elev=15, azim=230)\n plt.title('X vs. Y vs. Cost')\n ax.set_xlabel(r'$\\theta_0$', fontsize=17)\n ax.set_ylabel(r'$\\theta_1$', fontsize=17)\n plt.show()\n plt.close()", "def plot_cube(ax: Axes, x: ArrayLike, y: ArrayLike, f_low: callable, f_upp: callable, **kwargs):\n # lower\n xm, ym = np.meshgrid(x, y)\n zm = f_low(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # upper\n zm = f_upp(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # north\n xm, ym = np.array([x, x]), y[0]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[0]), f_upp(x, y[0])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # south\n xm, ym = np.array([x, x]), y[-1]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[-1]), f_upp(x, y[-1])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # east\n xm, ym = x[0]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[0], y), f_upp(x[0], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # west\n xm, ym = x[-1]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[-1], y), f_upp(x[-1], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)", "def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))", "def __call__(\n self, *, plot=None, color=plot_util.cp_int[0], multiplier=None, **kwargs\n ):\n if self.region.ndim != 3:\n raise RuntimeError(\"Only 3-dimensional regions can be plotted.\")\n\n if plot is None:\n plot = k3d.plot()\n plot.display()\n\n multiplier = self._setup_multiplier(multiplier)\n\n plot_array = np.ones((1, 1, 1)).astype(np.uint8) # avoid k3d warning\n\n rescaled_region = self.region.scale(1 / multiplier)\n bounds = [\n i\n for sublist in zip(rescaled_region.pmin, rescaled_region.pmax)\n for i in sublist\n ]\n\n plot += k3d.voxels(\n plot_array, color_map=color, bounds=bounds, outlines=False, **kwargs\n )\n\n self._axis_labels(plot, multiplier)", "def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])", "def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf", "def drawSphere3D(x0,y0,z0, radius, hres, vres):\n dislin.sphe3d(x0,y0,z0, radius, hres, vres)", "def cloudy_grid_surface(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n model_number_matrix,grid_table = cloudy_library._restore_grid_table(grid_ext=p.grid_ext)\n\n fig = plt.figure(figsize=(10,7))\n ax = plt.axes(projection='3d')\n\n key1, key2 = list(p.cloudy_param.keys())[0],list(p.cloudy_param.keys())[1]\n value1, value2 = list(p.cloudy_param.values())[0],list(p.cloudy_param.values())[1]\n\n # Decide on what goes on x and y axis\n cloudy_parameters = np.array(['NH','FUV','hden','Z'])\n x_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][0]\n y_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][1]\n\n # Cut in grid table\n grid_table_cut = grid_table.iloc[np.where((grid_table[key1].values == value1) & \\\n (grid_table[key2].values == value2))[0]]\n x, y = grid_table_cut[x_index].values, grid_table_cut[y_index].values\n X, Y = np.meshgrid(np.unique(grid_table_cut[x_index].values), np.unique(grid_table_cut[y_index].values))\n\n # Plot line ratio?\n if '_' in p.line:\n L1 = grid_table_cut[p.line.split('_')[0]].values\n L2 = grid_table_cut[p.line.split('_')[1]].values\n L2[L2 == 0] = 1e9\n line_lum = (L1/L2).astype(float)\n vmin = np.min(np.log10(line_lum[L2 < 1e9]))\n\n else:\n line_lum = grid_table_cut[p.line].values.astype(float)\n vmin = np.min(np.log10(line_lum[line_lum > 0]))\n\n lum = np.log10(line_lum)\n lum = lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n # ########## Patching the grid !!\n # line_lum[np.isnan(line_lum)] = -1 # what are these?\n # # 0 values: not sure if we have any?\n # line_lum[line_lum == 0] = np.min(line_lum[line_lum > 0])\n # # Negative numbers: missing grid point\n # i_missing = np.where(line_lum < 0)[0]\n # while len(i_missing) > 0:\n # lum = np.log10(line_lum)\n # for i in i_missing:\n # # print(lum[i-1],lum[i+1])\n # try: \n # lum[i] = (lum[i-1] + lum[i+1])/ 2\n # except:\n # pass\n # # print('he',np.isnan(lum[i]))\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i-1] \n # except:\n # pass\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i+1] \n # except:\n # pass \n # line_lum[i] = 10.**lum[i]\n # # print(i,lum[i])\n # i_missing = np.where(line_lum < 0)[0]\n # ########## End of patching\n\n\n # pdb.set_trace()\n ax.plot_surface(X, Y, lum, cmap=\"autumn_r\", vmin=vmin, lw=0, rstride=1, cstride=1,alpha=0.8)\n\n ax.set_xlabel('\\n\\n' + getlabel('l'+x_index))\n ax.set_ylabel('\\n\\n' + getlabel('l'+y_index))\n\n try:\n ax.set_zlabel('\\n\\n' + getlabel('l%s' % p.line))\n except:\n ax.set_zlabel('\\n\\n log ' + p.line.replace('_','/'))\n\n\n ax.scatter(x[line_lum > 10.**vmin],y[line_lum > 10.**vmin],np.log10(line_lum[line_lum > 10.**vmin]),\\\n 'o',c=np.log10(line_lum[line_lum > 10.**vmin]),cmap='autumn_r',s=50)\n\n # print(x)\n # print(line_lum)\n ax.view_init(30, p.angle)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_grid_%s.%s' % (p.line, p.format), format=p.format, dpi=300) \n # pdb.set_trace()" ]
[ "0.6976554", "0.68625677", "0.6843619", "0.6560246", "0.6512885", "0.6510929", "0.6456214", "0.63602376", "0.63443196", "0.63097036", "0.6258106", "0.62344956", "0.61818576", "0.61670333", "0.61499923", "0.6146061", "0.6144096", "0.61362255", "0.60683507", "0.60314703", "0.6022591", "0.5997148", "0.59435076", "0.5932967", "0.59168744", "0.58705467", "0.5867112", "0.584799", "0.5838817", "0.583538" ]
0.7583633
0
Connect current container to the environment containers network.
def connect_to_containers_network(): logging.info("Connecting to the environment network") container_id = get_current_container_id() subprocess.check_output( 'docker network connect subsystem_tests-network {container_id}'.format(container_id=container_id), shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, container_name: str, aliases: list[str] = None,\n ipv4: str | None = None) -> None:\n self.log.debug(\n f\"Connecting {container_name} to network '{self.network_name}'\")\n self.network.connect(\n container_name, aliases=aliases, ipv4_address=ipv4\n )", "def connect_dc_network(self, dc_network):\n self.manage.net = dc_network\n self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))", "def _connect_docker_client(self):\n # lets check if Docker ENV information is set and use local socket as fallback\n if os.environ.get(\"DOCKER_HOST\") is None:\n os.environ[\"DOCKER_HOST\"] = \"unix://var/run/docker.sock\"\n LOG.warning(\"ENV variable 'DOCKER_HOST' not set. Using %r as fallback.\" % os.environ[\"DOCKER_HOST\"])\n\n # lets connect to the Docker instance specified in current ENV\n # cf.: http://docker-py.readthedocs.io/en/stable/machine/\n dc = docker.from_env(assert_hostname=False)\n # do a call to ensure that we are connected\n dc.info()\n LOG.info(\"Connected to Docker host: %r\" % dc.base_url)\n return dc", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def network_containers_routerless(self, containers):\n client = docker.from_env(timeout=60)\n network_name = f'{self.untrusted_user}_routerless_network'\n\n # Assumes untrustedXX naming scheme, where XX is a number\n untrusted_num = int(self.untrusted_user.replace('untrusted', '')) + 100\n subnet = 1\n ip_address_start = f'10.{untrusted_num}.{subnet}'\n ipam_pool = docker.types.IPAMPool(subnet=f'{ip_address_start}.0/24')\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n # Create the global network\n # TODO: Can fail on ip conflict.\n network = client.networks.create(\n network_name,\n driver='bridge',\n ipam=ipam_config,\n internal=True\n )\n client.close()\n\n host = 2\n for container in containers:\n ip_address = f'{ip_address_start}.{host}'\n network.connect(\n container.container,\n ipv4_address=ip_address,\n aliases=[container.name, ]\n )\n container.set_ip_address(network_name, ip_address)\n host += 1\n self.networks.append(network)", "def network_containers_with_router(self, containers):\n client = docker.from_env(timeout=60)\n router = self.get_container_with_name('router', containers)\n router_connections = {}\n network_num = 10\n subnet = 1\n # Assumes untrustedXX naming scheme, where XX is a number\n untrusted_num = int(self.untrusted_user.replace('untrusted', '')) + 100\n container_to_subnet = {}\n\n for container in containers:\n network_name = f\"{container.full_name}_network\"\n if container.name == 'router':\n continue\n # We are creating a new subnet with a new subnet number\n subnet += 1\n # We maintain a map of container_name to subnet for use by the router.\n container_to_subnet[container.name] = subnet\n actual_name = '{0}_Actual'.format(container.name)\n\n # Create the network with the appropriate iprange\n ipam_pool = docker.types.IPAMPool(subnet=f'{network_num}.{untrusted_num}.{subnet}.0/24')\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n network = client.networks.create(\n network_name,\n ipam=ipam_config,\n driver='bridge',\n internal=True\n )\n\n # We connect the container with host=2. Later we'll connect the router with host=3\n container_ip = f'{network_num}.{untrusted_num}.{subnet}.2'\n container.set_ip_address(network_name, container_ip)\n\n network.connect(container.container, ipv4_address=container_ip, aliases=[actual_name, ])\n self.networks.append(network)\n\n # The router pretends to be all dockers on this network.\n if len(container.outgoing_connections) == 0:\n connected_machines = [x.name for x in containers]\n else:\n connected_machines = container.outgoing_connections\n\n for connected_machine in connected_machines:\n if connected_machine == 'router':\n continue\n if connected_machine == container.name:\n continue\n\n if container.name not in router_connections:\n router_connections[container.name] = []\n\n if connected_machine not in router_connections:\n router_connections[connected_machine] = []\n\n # The router must be in both endpoints' network, and must connect to\n # all endpoints on a network simultaneously, so we group together\n # all connections here, and then connect later.\n router_connections[container.name].append(connected_machine)\n router_connections[connected_machine].append(container.name)\n # Connect the router to all networks.\n for startpoint, endpoints in router_connections.items():\n full_startpoint_name = f'{self.untrusted_user}_{startpoint}'\n network_name = f\"{full_startpoint_name}_network\"\n # Store the ip address of the router on this network\n router_ip = f'{network_num}.{untrusted_num}.{container_to_subnet[startpoint]}.3'\n router.set_ip_address(network_name, router_ip)\n\n aliases = []\n for endpoint in endpoints:\n if endpoint in aliases:\n continue\n aliases.append(endpoint)\n network = self.get_network_with_name(network_name)\n network.connect(router.container, ipv4_address=router_ip, aliases=aliases)\n client.close()", "def create_docker_network(enode, category, config):\n # Let's find out which networks exist already\n dockernets = enode._client.networks()\n\n # Let's figure out what docker network we should connect to\n netname = config.get(\n 'connect_to',\n '{}_{}'.format(enode._container_name, category)\n )\n\n # Create docker network if it doesn't exist already\n if not any(d['Name'] == netname for d in dockernets):\n enode._client.create_network(\n name=netname,\n driver='bridge'\n )\n\n # Disconnect from 'none' to be able to connect to other\n # networks (https://github.com/docker/docker/issues/21132)\n networks = enode._client.inspect_container(\n enode.container_id\n )['NetworkSettings']['Networks']\n if 'none' in networks:\n enode._client.disconnect_container_from_network(\n container=enode._container_id,\n net_id='none'\n )\n\n # Connect container to the docker network\n enode._client.connect_container_to_network(\n container=enode._container_id,\n net_id=netname\n )\n\n # Check if this category has a defined netns\n netns = config.get('netns', None)\n if netns is None:\n return\n\n # Create this network's namespace inside the container\n # https://imgflip.com/i/16621d\n enode._docker_exec('ip netns add {}'.format(netns))\n\n netns_exec = 'ip netns exec {}'.format(netns)\n\n # lo should always be up\n enode._docker_exec('{} ip link set dev lo up'.format(netns_exec))\n\n # Find out the name Docker gave to this interface name\n iface = get_iface_name(enode, netname)\n\n # Prefix interface\n prefixed_iface = '{}{}'.format(config['prefix'], iface)\n\n # Move this network's interface to its netns\n enode._docker_exec(\n 'ip link set dev {iface} netns {netns} name {prefixed_iface}'.format(\n **locals()\n )\n )\n\n # Reset the interface to original config\n # This is required because after moving the iface from netns it lost its\n # ip and other config.\n enode._docker_exec(\n '{netns_exec} '\n 'ip address add {docker_netconf[IPAddress]}/'\n '{docker_netconf[IPPrefixLen]} '\n 'dev {prefixed_iface}'.format(\n **locals()\n )\n )\n enode._docker_exec(\n '{netns_exec} ip link set dev {prefixed_iface} up'.format(\n **locals()\n )\n )", "def connect_env(self,environment,agentIndex,allAgents):\n self.environment = environment\n self.agentIndex = agentIndex\n self.allAgents = allAgents\n environment.connect_server(agentIndex)", "def learn_yourself(self):\n try:\n file = open(\"/proc/self/cgroup\")\n self.id = [l for l in file.read().split(\"\\n\") if l.find(\"cpu\") != -1][0].split(\"/\")[-1]\n if len(self.id) > 64:\n slice = [x for x in re.split('[^a-fA-F0-9]', self.id) if len(x) is 64]\n if len(slice) is 1:\n self.id = slice[0]\n else:\n print(\"[ERROR] Couldn't parse container id from value :\", self.id, file=sys.stderr)\n raise Exception()\n self.container = self.client.containers.get(self.id)\n self.networks = [a for a in self.container.attrs[\"NetworkSettings\"][\"Networks\"].keys()]\n self.networks = {self.client.networks.get(a).id: a for a in self.networks}\n file.close()\n except Exception as e:\n print(\"[ERROR]Couldn't determine container ID of this container:\", e.args,\n \"\\n Is it running in docker environment?\",\n file=sys.stderr)\n print(\"Falling back to default network\", file=sys.stderr)\n network = self.client.networks.get(\"frontend\")\n self.networks[network.id] = \"frontend\"", "def connect(self,host, container):\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n attempts = 3\n count = 0\n while attempts:\n attempts -= 1\n count +=1\n try:\n if attempts > 0:\n print \"Attempting Connection to %s (%i/%i)\" % (host, count, attempts)\n logging.debug(\"\\t connecting to %s@%s\" % (args.user, host))\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n host,\n username=args.user,\n port=22,\n allow_agent=True,\n look_for_keys=True,\n timeout=5\n )\n logging.debug(\"Connected to %s\" % (host))\n chan = ssh.invoke_shell()\n # print(repr(ssh.get_transport()))\n if not container:\n logging.debug(\"*** Initiating Interactive Session\")\n interactive().rshell(chan)\n logging.debug(\"Closing SSH session to %s\" % (host))\n chan.close()\n interactive().disconnect()\n break\n else:\n print \"Max Connection attempts reached (%i/%i)\" % (count, attempts)\n logging.debug(\"Exiting with code 3\")\n sys.exit(3)\n except paramiko.AuthenticationException:\n print \"Authentication failed when connecting to %s\" % (host)\n sys.exit(1)\n except:\n print \"Connection (%i/%i) failed to %s, waiting 5s retry\" % (count, attempts, host)\n time.sleep(5)", "def setupInfraNetwork(\n networkName: str, imageName: str, ctx: ExecContext) -> None:\n try:\n args = [\"--detach\"]\n if ctx.uidmaps:\n args.extend(ctx.getUidMaps())\n if ctx.dns:\n args.append(f\"--dns={ctx.dns}\")\n args.extend(ctx.getHosts())\n\n executePod(\"net-\" + networkName, args, imageName, [\"sleep\", \"Inf\"])\n except AlreadyRunning:\n pass", "def setup_bridge_network(self, iface):\n out = utils.run_script('conjure-up.lxc network show conjureup1')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup1 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n if out.returncode != 0:\n raise Exception(\"Failed to create LXD conjureup1 network bridge: \"\n \"{}\".format(out.stderr.decode()))", "def connect(self):\n self.net.active(True)\n self.net.config(essid=self.ssid, password=self.pwd, channel=3)\n\n while not self.net.active():\n pass\n\n self.net.ifconfig((\"192.168.4.5\", \"255.255.255.0\", \"192.168.4.1\", \"208.67.222.222\"))", "def __init__(self, network, container):\r\n super(EnvironmentEndpoint, self).__init__(network)\r\n\r\n self._container = container", "def connect(self):\n self.conn.connect()", "def connect():", "def network_containers(self, containers):\n if len(containers) <= 1:\n return\n client = docker.from_env(timeout=60)\n none_network = client.networks.get('none')\n client.close()\n\n # Remove all containers from the none network\n for container in containers:\n none_network.disconnect(container.container, force=True)\n\n if self.get_router(containers) is not None:\n self.network_containers_with_router(containers)\n else:\n self.network_containers_routerless(containers)\n\n # Provide an initialization file to each container.\n self.create_knownhosts_txt(containers)\n self.create_knownhosts_json(containers)", "def lab_network(self) -> None:\n self.host = getattr(self, \"host\")\n try:\n getattr(self.host, \"uboot_network_setup\")(self)\n except AttributeError:\n raise Exception(\n f\"The lab-host {self.host!r} does not seem to support uboot network setup!\"\n )", "def _create_docker_container(self):\n cwd = os.getcwd()\n\n # get a docker client\n docker_client = docker.from_env()\n docker_image = \"aca_build0:latest\"\n mount_pnt = docker.types.Mount(\"/mnt/alcor-control-agent\",\n f'''{cwd}/../..''',\n type='bind')\n\n mount_modules = docker.types.Mount(\"/lib/modules\",\n \"/lib/modules\",\n type='bind')\n\n # Create the container in privileged mode\n container = docker_client.containers.create(\n docker_image, '/bin/bash', tty=True,\n stdin_open=True, auto_remove=False, mounts=[mount_pnt, mount_modules],\n privileged=True, cap_add=[\"SYS_PTRACE\"],\n ports={str(aca_droplet.port_internal) + \"/tcp\": ('0.0.0.0', aca_droplet.port_external)},\n security_opt=[\"seccomp=unconfined\"], name=self.id)\n container.start()\n container.reload()\n\n # Increment the static external port number counter\n aca_droplet.port_external = aca_droplet.port_external + 1\n\n # Restart dependancy services\n container.exec_run(\"/etc/init.d/rpcbind restart\")\n container.exec_run(\"/etc/init.d/rsyslog restart\")\n container.exec_run(\"ip link set dev eth0 up mtu 9000\")\n\n # We may need to restart ovs\n # container.exec_run(\"/etc/init.d/openvswitch-switch restart\")\n\n # Create simlinks\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/bin /trn_bin\")\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/xdp /trn_xdp\")\n container.exec_run(\"ln -s /sys/fs/bpf /bpffs\")\n\n container.exec_run(\n \"ln -s /mnt/alcor-control-agent/build/ /aca_build\")\n\n # Run the transitd in the background\n container.exec_run(\"/trn_bin/transitd \",\n detach=True)\n\n # Enable debug and tracing for the kernel\n container.exec_run(\n \"mount -t debugfs debugfs /sys/kernel/debug\")\n container.exec_run(\n \"echo 1 > /sys/kernel/debug/tracing/tracing_on\")\n\n # Enable core dumps (just in case!!)\n container.exec_run(\"ulimit -u\")\n cmd = \"echo '/mnt/alcor-control-agent/mizar/core/core_{}_%e.%p' |\\\n tee /proc/sys/kernel/core_pattern \".format(self.ip)\n container.exec_run(cmd)\n\n self.container = container\n self.ip = self.container.attrs['NetworkSettings']['IPAddress']\n self.mac = self.container.attrs['NetworkSettings']['MacAddress']", "def setup_net(self):\n pass", "def connect(self) -> ContextManager[Connection]:", "def connect(self):\n\t\tself._entity_server_connection.attempt_connection()", "def connect_to_master():", "def start(self) -> None:\r\n # --ulimit nofile=<soft limit>:<hard limit> set the limit for open files\r\n docker_run_command = ('docker run --ulimit nofile=65535:65535 -td -p %d:8545 -p %d:30303 --rm --name %s %s' %\r\n (self.rpc_port, self.ethereum_network_port, self.name, IMAGE))\r\n sleep(0.6)\r\n result = self.ip.exec_command(docker_run_command)\r\n if result:\r\n if result.startswith('docker: Error'):\r\n print(result)\r\n print(self.ip)\r\n raise RuntimeError('An error occurs while starting docker container. Container maybe already exists')\r\n print('container of node %s of blockchain %s at %s:%s started' % (self.node_index, self.blockchain_id,\r\n self.ip.address, self.rpc_port))\r\n new_account_command = 'docker exec -t %s geth --datadir abc account new --password passfile' % self.name\r\n sleep(0.1)\r\n account = self.ip.exec_command(new_account_command).split()[-1][1:-1]\r\n sleep(0.3)\r\n if len(account) == 40: # check if the account is valid\r\n self.accounts.append(account)\r\n else:\r\n print('invalid account')", "def initialize(self):\n LOGGER.info('Set %d initializing...', self.port_set)\n # There is a race condition here with ovs assigning ports, so wait a bit.\n time.sleep(2)\n shutil.rmtree(self.tmpdir, ignore_errors=True)\n networking_name = 'gw%02d' % self.port_set\n networking_port = self.pri_base + self.NETWORKING_OFFSET\n LOGGER.debug(\"Adding networking host on port %d\", networking_port)\n cls = docker_host.make_docker_host('daq/networking', prefix='daq', network='bridge')\n try:\n self.networking = self.runner.add_host(networking_name, port=networking_port,\n cls=cls, tmpdir=self.tmpdir)\n self._create_config(self.networking.tmpdir)\n self.record_result('startup')\n except Exception as e:\n self._state_transition(_STATE.ERROR)\n self.record_result('startup', exception=e)", "def enter_as_network_client(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2):\n host = user.ip_address\n port = 5555\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n s.connect((host,port))\n while 1:\n pass\n\n s.close()", "def docker(self, obj):\n\n if self._dockerclient is not None:\n return self._dockerclient\n host = self.properties[self.HOST_NODE]\n host_ip = self.get_host_ip(self, obj, host)\n url = 'tcp://' + host_ip + ':2375'\n self._dockerclient = docker.Client(base_url=url)", "def do_network_attach(cs, args):\n opts = {}\n opts['container'] = args.container\n opts['network'] = args.network\n opts['port'] = args.port\n opts['fixed_ip'] = args.fixed_ip\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.network_attach(**opts)\n print(\"Request to attach network to container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Attach network to container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def connect(ctx, config):\n log.info('Opening connections...')\n remotes = []\n machs = []\n for name in ctx.config['targets'].iterkeys():\n machs.append(name)\n for t, key in ctx.config['targets'].iteritems():\n t = misc.canonicalize_hostname(t)\n log.debug('connecting to %s', t)\n try:\n if ctx.config['sshkeys'] == 'ignore':\n key = None\n except (AttributeError, KeyError):\n pass\n remotes.append(\n remote.Remote(name=t, host_key=key, keep_alive=True, console=None))\n ctx.cluster = cluster.Cluster()\n\n remotes2 = []\n remotes3 = []\n found = 1\n for host in ctx.config['targets'].iterkeys():\n\tremotes2.append(host)\n remotes3 = sorted_nicely (remotes2)\n if 'roles' in ctx.config:\n for rem, roles in zip(remotes3, ctx.config['roles']):\n assert all(isinstance(role, str) for role in roles), \\\n \"Roles in config must be strings: %r\" % roles\n \t for objs in remotes:\n\t\tif rem == objs.name:\n \t ctx.cluster.add(objs, roles)\n found = 0\n\t\t break;\n\t if found == 1:\n\t\tlog.error('role matching error %s' % rem)\n log.info('roles: %s - %s' % (rem, roles))\n else:\n for rem in remotes:\n ctx.cluster.add(rem, rem.name)", "def _connect(self):\n if self.cluster.get('encrypted_password'):\n self.cluster['password'] = aws_etl.utils.decrypt(\n self.cluster['encrypted_password'])\n\n self.connection = connect(\n host=self.cluster['host'],\n port=self.cluster['port'],\n sslmode='require',\n user=self.cluster['user'],\n password=self.cluster['password'],\n database=self.cluster['database'])\n return self.connection" ]
[ "0.68715274", "0.6033247", "0.5958551", "0.5952797", "0.5917014", "0.5764563", "0.57549083", "0.57498366", "0.57448465", "0.573062", "0.57062066", "0.5695735", "0.5688441", "0.5684623", "0.56554174", "0.56166863", "0.56042194", "0.5562665", "0.5559789", "0.5551756", "0.55287415", "0.5521143", "0.5485183", "0.5467964", "0.5466669", "0.5451101", "0.5446656", "0.5444427", "0.54249537", "0.54153824" ]
0.83500123
0
Overrides Die.roll() so that in addition to rolling the dice, it sets the die's value based on the currentValue.
def roll(self): self.currentValue = choice(self.possibleValues) self.value = AngryDie.ANGRY_VALUES[self.currentValue] return self.currentValue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def roll(self):\n #dieValue = [] \n self._value = random.randrange(Die.SIDES) + 1\n self._update()\n #dieValue.append(self._value)\n #print(dieValue)\n #print(self._value)\n self._valueA = random.randrange(Die.SIDES) + 1\n #self._update2()\n #print(self._valueA)", "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def test_currentValue_is_updated_to_roll_value(self):\n rolled_value = self.new_die.roll()\n if rolled_value == self.new_die.currentValue:\n self.assertTrue(True, \"currentValue {} matches the rolled value\".format(self.new_die.currentValue))\n return", "def roll(self):\n self.rolled = random.randint(1, 6)\n return self.rolled", "def roll(self):\n self._rollCount += 1\n self._die1.roll()\n self._die2.roll()\n (v1, v2) = (self._die1.getValue(),\n self._die2.getValue())\n self._lastRoll = (v1, v2)\n if self._initialSum == 0:\n initialSum = v1 + v2\n if initialSum in (2, 3, 12):\n return \"LOSE\"\n elif initialSum in (7, 11):\n return \"WIN\"\n else:\n return \"CONTINUE\"\n else:\n sum = v1 + v2\n if sum == 7:\n return \"LOSE\"\n elif sum == initialSum:\n return \"WIN\"\n else:\n return \"CONTINUE\"", "def roll(self):\n self.current_roll = random.randint(self.min, self.max)\n return self.current_roll", "def _rollOneDie(self):\n return random.randint(1, 6)", "def hook_rolldice(self):\n return ui.roll(self)", "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "def roll(self):\n total = 0\n\n if self.num_dice is not None and self.dice_type is not None:\n for _ in range(self.num_dice):\n total += randint(1, self.dice_type)\n elif self.min_value is not None and self.max_value is not None:\n total = randint(self.min_value, self.max_value)\n\n return total + self.plus", "def roll(self):\n return self._roll", "def roll(self):\n return self._roll", "def rolldice(self):\n raise NotImplementedError()", "def rollDie(self):\n return random.randint(1, self.sides)", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def simple_roll(dice):\n return roll(dice).total", "def roll_the_dice(self, dice):\n if type(dice) == list:\n for die in dice:\n die.roll()", "def roll(self):\n\n # Return a random integer between 1 and 6\n return random.randint(1, 6)", "def update_last_roll(self, roll):\n\n # Increment the attribute by the passed value\n self._last_roll = roll", "def die_roll():\n roll = random.randint(1,6)\n return roll", "async def roll(self, ctx, roll: str):\n r = re.compile(r'\\d+[DdWw]\\d+')\n\n if r.match(roll) is None:\n await ctx.send(f'\"{roll}\" is not a valid input, please use e.g. 1d20 / 3d6 or 1w20 / 3w6')\n else:\n roll_times, roll_sides = self.rollStringToValues(roll)\n roll_values = Dice.roll_XdY(roll_times, roll_sides)\n self._last_roll = roll\n await self.send(ctx, f'Rolling {roll}:\\t{roll_values}')", "def roll(self):\n return randint(1,6)", "def roll(self):\n return cbrandom.throwDices(\"1d20\")", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def record_roll(self, roll):\n if roll == 1:\n self.turn_over = True\n self.score = 0\n else:\n self.score += roll", "def roll_the_dice(self, index):\n # first roll\n first_roll_result = self._rolls_list[index].roll_dice()\n print(f'FIRST ROLL: {first_roll_result}\\n')\n\n # first roll: prompt player to keep, reroll, or select dice\n keep_first_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # second roll\n second_roll_result = self._rolls_list[index].reroll_dice(\n keep_first_roll)\n print(f'\\nSECOND ROLL: {second_roll_result}\\n')\n\n # second roll: prompt player to keep, reroll, or select dice\n keep_second_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # third roll\n self.final_roll = self._rolls_list[index].reroll_dice(\n keep_second_roll)\n print(f'\\nFINAL ROLL: {self.final_roll}\\n')", "def roll_dice(self):\r\n return randint(1,self.sides)", "def testRoll(self):\n \n nsides=3\n die = BaseDie(nsides)\n lighted_die = LightedDie(nsides,colors={1:'blue',2:'yellow',3:'gold'})\n\n self.assertEqual(die.last_roll,None)\n\n die.roll()\n lighted_die.roll()\n\n for d in [die,lighted_die]:\n self.assertTrue(d.last_roll>0 and d.last_roll <= nsides)" ]
[ "0.72756755", "0.72380686", "0.71869373", "0.71206564", "0.70240617", "0.69059175", "0.68994266", "0.685288", "0.682175", "0.67554736", "0.66809267", "0.66572595", "0.66572595", "0.6561393", "0.64926106", "0.6468559", "0.6467723", "0.6460948", "0.64546245", "0.64188683", "0.6376875", "0.6365046", "0.63441426", "0.63189715", "0.63153803", "0.63110936", "0.6299218", "0.6240159", "0.62309426", "0.62272483" ]
0.7436479
0
A helper method that, given a valid faceValue, will update the die's currentValue and value to match the passed faceValue.
def setDieFaceValue(self, faceValue): if faceValue in AngryDie.ANGRY_VALUES: self.currentValue = faceValue self.value = AngryDie.ANGRY_VALUES[faceValue]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setFace(self, value):\n self.face = value", "def setFace(self, value):\n self.face = value", "def test_currentValue_is_updated_to_roll_value(self):\n rolled_value = self.new_die.roll()\n if rolled_value == self.new_die.currentValue:\n self.assertTrue(True, \"currentValue {} matches the rolled value\".format(self.new_die.currentValue))\n return", "def set_value(self, new_value):\n temp = new_value\n if temp < self.limits[0]:\n temp = self.limits[0]\n if temp > self.limits[1]:\n temp = self.limits[1]\n self.value = temp\n self.rect = self._as_rect()\n return self.value", "def update_face(self, face):\n\n if face.uuid not in self._faces:\n error_str = \"Trying to update a non-existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n if not isinstance(face, Face):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Face expected.\"\n raise TypeError(error_str)\n\n face_to_update = self._faces[face.uuid]\n\n face_to_update.data = face.data\n face_to_update.points = face.points", "def updateItem(self, value):\n self.value = value\n self.age = 0\n self.freq += 1", "def __init__(self, face: str, value: int, suit: str):\n self.face = face\n self.value = value\n self.suit = suit", "def update_fouls(self, turn_val):\n\n if turn_val in FOUL_TURNS:\n # Increment fouls_count\n self.fouls_count += 1\n\n # If foul count reached max foul count\n if self.fouls_count == MAX_FOUL_COUNT:\n # Update total_count\n self.total_points += FOUL_PENALTY_POINTS \n # reset the foul count\n self.fouls_count = 0\n \n return True\n else:\n # reset the foul count as current turn is not a foul\n self.fouls_count = 0\n \n return False", "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def _update_value(self) -> int:\n\n value_list = [card.value if card.value <= 10 else 10 for card in self]\n hand_value = sum(value_list)\n\n # Checks to see if any Aces can be worth 11 points instead of 1 point\n while value_list.count(1) > 0 and (21 - hand_value) >= 10:\n value_list[value_list.index(1)] = 11\n hand_value = sum(value_list)\n\n self._value = hand_value", "def update_value(self, reward):\n\t\tval = self.value\n\t\tval = val + ((reward - val)/self.visited)\n\t\tself.value = val", "def set_value(self, value):\n if self.value:\n raise ValueError(\"Already has a Value:\", self)\n\n self.value = value\n\n if self.value != 0:\n self.possible = None\n self.solved = True", "def update(self, value):\n if value < self.min:\n self.min = value\n if value > self.max:\n self.max = value\n self.total += value\n self.instances += 1\n self.values.append(value)", "def update(self, val, feats):\n raise NotImplementedError", "def vcjamged(self, whichval, newvalue):\n\n if self.performingupdate or whichval >= self.numcols or type(newvalue) != float:\n return\n\n diff = newvalue - self.currentvalues[whichval]\n if abs(diff) < 0.01:\n return\n\n incr = 0.01\n if diff < 0.0:\n incr = -.01\n\n while newvalue in self.currentvalues:\n newvalue = round(newvalue + incr, 2)\n\n # If we've run off either end, we'll have to go back to where we were\n\n if newvalue < self.minvalue or newvalue > self.maxvalue:\n self.performingupdate = True\n self.vspins[whichval].setValue(self.currentvalues[whichval])\n self.performingupdate = False\n return\n\n self.performingupdate = True\n self.currentvalues[whichval] = newvalue\n self.currentvalues.sort()\n self.createrest()\n self.fillingrid()\n self.performingupdate = False\n self.plotmap()", "def update(self, time, frame, face_position = None):\n\t\tself.face_position = face_position or self.face_position", "def set_value(self, val):\n for i, v in enumerate(val):\n if v < self.allowable_min[i]:\n raise ValueError(\"{0}, {1} less than min value {2}, index {3}\".format(self.get_name(), val, self.min_found, i))\n if v > self.allowable_max[i]:\n raise ValueError(\"{0}, {1} greater than max value {2}, index {3}\".format(self.get_name(), val, self.max_found, i))\n\n self.min_found[i] = min(self.min_found[i], v)\n self.max_found[i] = max(self.max_found[i], v)\n\n n = self.count+1\n self.avg_found[i] = self.avg_found[i] * (self.count / n) + v * (1.0 / n)\n\n self.count += 1\n self.value = val", "def set_curr_value(self, val):\n # only goal that is in progress can have it's current value changed\n if self._status != EGoalStatus.IN_PROGRESS:\n raise NotImplementedError('Cannot set value to finished or not started goal')\n # try cast to int - mainly for QuantifiedGoal representation\n val = self.fw.types.try_float_cast(val)\n # update both in the stages object and in raw data\n self._values[EStage.CURRENT] = self._data_process(val)\n self._skeleton.curr_value = val\n # use progressor to update the database\n self._progressor.dump_to_database(self)", "def update_val(self, val):\n self.in_val = val", "def update_reference_value(self, reference_value: float):\n self.__reference_value = reference_value", "def value(self, value):\n self._update_value(value)", "def __update_values(self):\r\n\r\n\t\tv = [0]\r\n\t\thas_ace = False\r\n\r\n\t\t# two values for hands with aces\r\n\t\tfor card in self.cards:\r\n\t\t\tv[0] += card.value\r\n\t\t\tif card.rank == 'Ace':\r\n\t\t\t\thas_ace = True\r\n\r\n\t\t# hand is soft if below 12\r\n\t\tif has_ace:\r\n\t\t\tif v[0] < 12:\r\n\t\t\t\tv.append(v[0] + 10)\r\n\r\n\t\tself.values = v", "def update(self, val):\n self.current_val = val\n self.redraw()", "def _set_value(self, value):\n if value is undefined:\n self._status = 3 if (self._count == 0) else 0\n return # new tick, but no update of value\n self._last_value = self._value\n self._value = value\n self._count += 1\n self._last_timestamp = self._timestamp\n self._timestamp = time.time()\n self._status = 0\n if self._ob is not None:\n ob = self._ob()\n if hasattr(ob, '_signal_changed'):\n ob._signal_changed(self)", "def _update_stats(self, value):\n solver = self.solver\n is_better = solver.sense.is_better\n if isinstance(value, Infeasible):\n self.infeas_count += 1\n if value < self.least_infeas_value:\n self.least_infeas_value = value\n solver.channel.emit(solver.SIGNALS.LEAST_INFEAS_VALUE_CHANGED)\n if value > self.most_infeas_value:\n self.most_infeas_value = value\n solver.channel.emit(solver.SIGNALS.MOST_INFEAS_VALUE_CHANGED)\n else:\n self.feas_count += 1\n if is_better(value, self.best_feas_value):\n self.best_feas_value = value\n solver.channel.emit(solver.SIGNALS.BEST_FEAS_VALUE_CHANGED)\n if is_better(self.worst_feas_value, value):\n self.worst_feas_value = value\n solver.channel.emit(solver.SIGNALS.WORST_FEAS_VALUE_CHANGED)\n if is_better(value, self.best_value):\n self.best_value = value\n solver.channel.emit(solver.SIGNALS.BEST_SOL_VALUE_CHANGED)\n if is_better(value, solver.incumbent):\n solver.incumbent = value", "def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue", "def updateValue(self):\n if len(self.__XValue) > 0:\n # TODO: Should be calling the base __append method\n self.values.append((self.__XValue[-1] + self.__offset) % 360)\n self.lastUpdate = time.time()", "def changeValue(situation , valueToPlay, player):\r\n situation[valueToPlay[0]][valueToPlay[1]] = Player.get_spec(player)\r\n return situation", "def _update(self, handle, value):\n _LOGGER.debug(\n \"%s: %15s temperature = %-2d.%-2d, humidity = %3d\",\n handle,\n self.name,\n value[0],\n value[2],\n value[1],\n )\n self.data[\"temp\"] = float(\"%d.%d\" % (value[0], value[2]))\n self.data[\"humid\"] = value[1]", "def test_value_can_be_changed(self):\n value = 30\n self.progressbar.setValue(value)\n self.assertEqual(self.progressbar.getValue(), value)\n\n # TODO: should we check for variable type to avoid app crashes ?\n # NOTE: weirdly enough, the sliders don't crash like this; this may\n # be a bug in libui.\n # with self.assertRaises(ValueError):\n # self.progressbar.setValue('hello')" ]
[ "0.568083", "0.568083", "0.5458344", "0.54526615", "0.52289414", "0.5217452", "0.5184717", "0.5160611", "0.51151603", "0.511195", "0.50905704", "0.506557", "0.50020486", "0.49880826", "0.49478003", "0.49093857", "0.4904442", "0.48498568", "0.48484898", "0.4839955", "0.48307022", "0.48210955", "0.48198763", "0.4813514", "0.48130748", "0.4805555", "0.4801145", "0.48010918", "0.47877353", "0.47626305" ]
0.81279725
0
Roll the dice passed in the list.
def roll_the_dice(self, dice): if type(dice) == list: for die in dice: die.roll()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "def roll_the_dice(self, index):\n # first roll\n first_roll_result = self._rolls_list[index].roll_dice()\n print(f'FIRST ROLL: {first_roll_result}\\n')\n\n # first roll: prompt player to keep, reroll, or select dice\n keep_first_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # second roll\n second_roll_result = self._rolls_list[index].reroll_dice(\n keep_first_roll)\n print(f'\\nSECOND ROLL: {second_roll_result}\\n')\n\n # second roll: prompt player to keep, reroll, or select dice\n keep_second_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # third roll\n self.final_roll = self._rolls_list[index].reroll_dice(\n keep_second_roll)\n print(f'\\nFINAL ROLL: {self.final_roll}\\n')", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)", "def roll_dice():\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)", "def roll(self, *diceNums):\n if not diceNums:\n diceNums = list(range(0,5))\n for i in diceNums:\n self._dice[i - 1] = random.randint(1,6);\n self._dice = Dice.normalize(self._dice)", "def roll(self):\n return tuple(d.roll() for d in self.dice) ## note: len(result) == 2 always", "def roll(self):\n rolls = []\n if self.dice_array is not None:\n for dice in self.dice_array:\n rolls.append(np.random.randint(1, dice+1))\n else:\n for _ in range(0,self.number):\n rolls.append(np.random.randint(1, self.sides+1))\n #Fast way from stack overflow to determine if all\n #entries in \"rolls\" are equal, i.e. when doubles are rolled\n #but for arbitrary number of dice\n doubles = not rolls or [rolls[0]]*len(rolls) == rolls\n return np.sum(rolls), rolls, doubles", "def rolldice(self):\n raise NotImplementedError()", "def test_reroll_dice(self):\n self.roll.current_dice_list = [1, 2, 3, ]\n self.roll.keeper_dice_list = [1, 2]\n\n self.roll.reroll_dice(self.roll.current_dice_list)\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n self.assertEqual(self.roll.current_dice_list[3], 1)\n self.assertEqual(self.roll.current_dice_list[4], 2)", "def roll(self):\n results = []\n\n if self.dice > 20:\n raise DiceError(\"Oh là, cuistre! Pas plus de 20 dés, ou il va \" +\n \"t’arriver des bricoles!\")\n\n for die in range(self.dice):\n results.append(self._rollOneDie())\n\n results.sort(reverse=self.keep_max)\n\n total = sum(results[0:self.keeps])\n successes = floor((total - self.target) / 5)\n\n # something occures bad here\n return \"%s Total: %d\" % (\n str(results), sum(results[0:self.keeps]) + self.bonus), successes", "def roll_dices():\n dices = []\n\n for i in range(DICE_COUNT):\n dice = random.randrange(MIN_DICE, MAX_DICE + 1)\n dices.append(dice)\n\n return dices", "def hook_rolldice(self):\n return ui.roll(self)", "def simple_roll(dice):\n return roll(dice).total", "async def roll_sw(ctx, dice):\n await ctx.send(display_counts(resolve_sw(roll_many(dice, SW_DICE))))", "def test_roll_dice(self):\n # create partial current and keeper list to pass into roll_dice\n self.roll.current_dice_list = [1, 2, 3]\n self.roll.keeper_dice_list = [1, 2, 3]\n\n self.roll.roll_dice()\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n\n for i, dice in enumerate(self.roll.current_dice_list):\n self.assertTrue(1 <= dice <= 6)", "def roll_dice(self, number_of_dice, size_of_dice):\n # makes a list of random numbers based on the information\n # that was put in\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n\n # Checks wether the result needs to be sorted or not\n if self.sort is True:\n dice.sort()\n # Turns ints into strings after sorting\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n # Sets the last roll flag and returns to sort flag to false\n self.last_roll = converted_dice\n self.sort = False\n # Sets the last roll flag for easy cross function use.\n else:\n # Turns Ints into strings incase it had to be sorted\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def roll_die(self, number_of_rolls):\n\t\tfor roll in range(0, number_of_rolls):\n\t\t\tprint(str(randint(1, self.sides)), end = \", \")\n\t\tprint()", "def roll_dice(num_of_dice=1):\r\n sides = 6\r\n return [random.randrange(1, sides+1) for _ in xrange(num_of_dice)]", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def roll(count=2):\n # Generate a list of dice\n try:\n count = int(count)\n except ValueError:\n print(\"%s is not an integer\" % count)\n return -1\n if count < 1:\n raise ValueError(\"You must have at least one die\")\n print(\"Rolling {0} dice\".format(count))\n dice = [Die() for i in range(count)]\n total = 0\n idx = 0\n for d in dice:\n d.throw()\n idx += 1\n d.log.info(\"Die {} was {}\".format(idx, d.get_value()))\n total += d.get_value()\n if count == 2 and dice[0].get_value() == dice[1].get_value():\n print(\"Doubles!!\")\n print(\"Total number rolled is {}\".format(total))\n return tuple([d.get_value() for d in dice])", "async def roll_l5r(ctx, dice):\n await ctx.send(display_counts(resolve_l5r(roll_many(dice, L5R_DICE))))", "async def roll(self, ctx, roll: str):\n r = re.compile(r'\\d+[DdWw]\\d+')\n\n if r.match(roll) is None:\n await ctx.send(f'\"{roll}\" is not a valid input, please use e.g. 1d20 / 3d6 or 1w20 / 3w6')\n else:\n roll_times, roll_sides = self.rollStringToValues(roll)\n roll_values = Dice.roll_XdY(roll_times, roll_sides)\n self._last_roll = roll\n await self.send(ctx, f'Rolling {roll}:\\t{roll_values}')", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "async def roll(self, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await self.bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await self.bot.say(result)", "def roll_dices(self):\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n\n self.client.send_player_end_dices()\n self.game.player_rolled_dices([dice1, dice2])\n asyncio.ensure_future(self.move(dice1 + dice2))", "async def roll(self, ctx: commands.context, dice: str):\n\n if dice:\n lexer = DiceLexer()\n parser = DiceParser()\n\n try:\n step_data, result = parser.parse(lexer.tokenize(dice))\n except TypeError:\n await ctx.send(\"There was an error with your roll syntax. Please try again.\")\n return\n\n if result.is_integer():\n result = int(result)\n\n color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])\n title = f'Roll for {ctx.author.name}'\n description = f'**Result:**\\n' \\\n f'```\\n' \\\n f'{result}\\n' \\\n f'```\\n' \\\n f'**Steps:**\\n' \\\n f'```\\n'\n for step in step_data:\n description += step + '\\n'\n description += '```'\n\n embed = Embed(color=color, title=title, description=description)\n\n await ctx.send(embed=embed)", "def roll_dice(self):\r\n return randint(1,self.sides)" ]
[ "0.78297305", "0.76451164", "0.76148355", "0.7406287", "0.73938185", "0.73751354", "0.7322575", "0.7311594", "0.7302911", "0.72434473", "0.7187141", "0.71589607", "0.7152758", "0.71263856", "0.7106611", "0.7098338", "0.7085768", "0.70856994", "0.7061517", "0.70429206", "0.70402086", "0.7022989", "0.6996688", "0.6983048", "0.69810325", "0.69800955", "0.6974681", "0.6967556", "0.69659084", "0.69282293" ]
0.8384107
0
Print both die values, as well as the current stage.
def print_dice(self): stage_to_print = 3 if self.current_stage == 4 else self.current_stage print("You rolled:\n a = [ {} ]\n b = [ {} ]\n\nYou are in Stage {}" .format(self.die_a, self.die_b, stage_to_print))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_current_dice(self):\n print(\"You rolled:\\n a = [ {} ]\\n b = [ {} ]\\n\".\n format(self.die_a, self.die_b))", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def print_values(self):\n print \"Money %s, Attack %s\" % (self._money, self._attack)", "def Death_Blossom(self):\t\t\n\t\tprint(self.name.Title() + \"Die Die Die!\")", "def print_state(self):\n print(self.identifier, \n self.gender, \n self.age,\n self.sexual_activity,\n self.disease_status,\n self.time_since_infection,\n self.number_of_partners,\n self.current_partners)", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]", "def print_env_information(step_id, current_time, final_move, current_score, current_reward):\n print(\"Step: {}\".format(step_id))\n print(\"Current Time: {}\".format(current_time))\n print(\"Action: {}\".format(final_move))\n print(\"Current scenario score: {} \\nCurrent reward: {}\\n\".format(current_score, current_reward))", "def debug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n for state in RESPONSEOPTIONS:\r\n score = calcTotalScore(state, CurrentInput, CurrentState)\r\n print(state.id + \": \" + str(score) + \" ,\", end=\"\")\r\n print(\"\\n___________________________\")", "def print_outcome(self) -> None:\n pass", "def print_passed(self):\n if self.instance_type == \"FILE\":\n print(colored(\"PASS\", \"green\") + f\" | [{self.instance_type}] {self.instance_location}/{self.instance_name}\")\n\n if self.instance_type == \"HOST\":\n print(\n colored(\"PASS\", \"green\")\n + f\" | [{self.instance_type}] {self.instance_hostname} [SCHEMA ID] {self.schema_id}\"\n )", "def print_out():\n pass", "def __str__( self ):\n\n return \"Die1: %s\\nDie2: %s\" % ( str(self.die1), str(self.die2) )", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def print_status(self):\n print \"Zombie has\" + super(Zombie, self).print_status()", "def print(self):\n for var in self.variables:\n print(var)", "def CatchPhase(self):\n print(\"{} said: Ain't that just the way!\".format(self.__name)) # str formatting format()\n # print(\"%s said: Ain't that just the way!\" % (self.__name)", "def printState(self,board):\n self.printBoard(board.getBoard())\n self.printScore(board,board.getScore())", "def main(self):\n self.display_welcome_message()\n print(self.die_a.__dict__)\n # Once the user hits ENTER the game starts.\n input(\"Press ENTER to start!\")\n # Roll both dice at the beginning of the game\n self.roll_dice([\"a\", \"b\"])\n # If player rolled two \"ANGRY\" dice, display 'you are angry'\n # message and reset stage value.\n self.handle_angry_dice()\n # Advance to Stage 2 if rolled 1 and 2\n if self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Loop on sequence below until user wins\n while not self.game_won:\n # Get user to select dice to roll.\n dice_to_roll = self.process_user_input()\n # Handle cheating\n self.register_player_cheating(self.die_a, dice_to_roll)\n self.register_player_cheating(self.die_b, dice_to_roll)\n # Display message to indicate if the player tried to cheat\n if self.just_cheated_a and self.just_cheated_b:\n print(\"You're cheating! You cannot win until you reroll both \"\n \"dice!\")\n elif self.just_cheated_a or self.just_cheated_b:\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n # If user attempts to illegally hold die 'a' in current stage ...\n if \"a\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_a):\n # Force rolling die 'a'\n dice_to_roll.append(\"a\")\n # If user attempts to illegally hold die 'b' in current stage ...\n if \"b\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_b):\n # Force rolling die 'b'\n dice_to_roll.append(\"b\")\n # Roll dice.\n self.roll_dice(dice_to_roll)\n # If player rolled two \"ANGRY\" dice, display \"you are angry\"\n # message and reset stage value.\n self.handle_angry_dice()\n # If player won, update game_won attribute break out of this loop.\n if self.is_game_over():\n self.game_won = True\n break\n # If player advances to next stage, increment game_stage attribute\n elif self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Player won! Display victory message!\n print(\"You've won! Calm down!\")", "def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))", "def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])", "def dump_sweep(self,status):\n L = self.level\n logger = logging.getLogger('root')\n logger.info('Process %2i on time %8.6f at stage %15s: Level: %s -- Iteration: %2i -- Residual: %12.8e',\n status.slot,status.time,status.stage,L.id,status.iter,L.status.residual)\n\n stats.add_to_stats(step=status.step, time=status.time, level=L.id, iter=status.iter,\n type='residual', value=L.status.residual)\n\n pass", "def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)", "def print_post():\n print('| | |'),", "def print_nice(self):\n print(\"- \" + str(self.__node_a.name) + \" (\" + self.__node_a.get_value_string() +\n \") -> \" + str(self.__node_b.name) + \" (\" + self.__node_b.get_value_string() + \")\")", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return", "def dft_print(self):\n #print(self.value)\n #if self.left:\n # self.left.dft_print()\n #if self.right:\n # self.right.dft_print()\n stack = []\n stack.append(self)\n while len(stack):\n current = stack.pop()\n print(current.value)\n if current.left:\n stack.append(current.left)\n if current.right:\n stack.append(current.right)", "def print_state(X):\n out = ''\n for coord in range(18):\n out += \"{0}\".format(STATE_VARS[coord])\n val = float(X[coord])\n out += \" {0: 2.4e}\\n\".format(val)\n\n print out", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def print_state(self):\n\t\tprint self.time, len(self.state['s']), len(self.state['p']), len(self.state['c'])" ]
[ "0.6175284", "0.5993719", "0.5895301", "0.5708749", "0.56729364", "0.5624648", "0.5600944", "0.55977625", "0.55730206", "0.5530365", "0.5493459", "0.5430469", "0.538948", "0.53874636", "0.5384385", "0.53282684", "0.52871954", "0.5283577", "0.527334", "0.52729154", "0.5268641", "0.52593523", "0.5212587", "0.52058005", "0.5199135", "0.519652", "0.51936406", "0.51798004", "0.51793265", "0.5172763" ]
0.7377811
0
Prompt the user for input, and return the dice they want to roll.
def determine_roll(self): dice_to_roll = [] to_roll = input("Roll dice: ") if 'a' in to_roll: dice_to_roll.append(self.die_a) if 'b' in to_roll: dice_to_roll.append(self.die_b) return dice_to_roll
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice(player: int) -> int:\n sides = 6\n roll_again = input(\"Player {}: Press ENTER to roll your dice...\".format(player))\n num_rolled = roll(sides)\n print(\"You rolled {}.\".format(num_rolled))\n return num_rolled", "def dice_roller():\n\n print('Use the xDy+z format to roll the dice. Example: \"2D6+4\"')\n roll = input(\"Enter the dice: \").lower()\n\n if roll.count('d') > 1:\n return \"Wrong input!\"\n\n validate = roll.replace(\"d\", \"\").replace(\"+\", \"\").replace('-', \"\")\n\n # \"validate\" variable is made just for checking if input is correct, by trying to convert itself into a integer.\n # This allows us to find if there are any unwanted characters, because we got rid of +, - and d which are essential for the roll.\n # Even if there are multiple of + or - signs, we can catch them either later in function or in other 3 functions.\n\n if not isinstance(roll, str):\n return \"Wrong input!\"\n\n try:\n validate = int(validate)\n except ValueError:\n return \"Wrong input!\"\n\n # Checking if the input is correct, so that user can only use xDy+z format\n # and is informed if they input something wrong.\n\n dice = dice_func(roll)\n amount_of_dice = amount_func(roll)\n modifier = mod_func(roll)\n\n if dice == \"Wrong input!\" or modifier == \"Wrong input!\":\n return \"Wrong input!\"\n\n # Checking if the other functions did not return \"Wrong input!\" statement\n\n else:\n result = 0\n for amount in range(amount_of_dice):\n result += randint(1, dice)\n \n # The last loop simulates the rolling of the dice, each one individually, just like in an RPG game.\n\n return result + modifier", "def process_user_input(self, roll_dice=None):\n # By default ask user which dice to roll.\n if roll_dice is None:\n user_input = input(\"Roll dice:\")\n # During testing use the input argument for dice to roll.\n else:\n user_input = roll_dice\n # Start with empty list of dice to roll\n dice_to_roll = []\n if \"a\" in user_input:\n dice_to_roll.append(\"a\")\n if \"b\" in user_input:\n dice_to_roll.append(\"b\")\n return dice_to_roll", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it.", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format(name=name, roll=roll)\n return roll", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format( name=name, roll=roll)\n return roll", "def roll_or_hold(self):\n decide_to_roll = input (\"Would you like to roll the die or hold? (Enter anything) \")\n if Dice.rollDie() == 1:\n return \"Turn over\"\n if decide_to_roll != \"\":\n print (\"Hey\")\n return True\n else:\n print (\"No\")\n return False", "def dice_func(die):\n\n viable_dice = ('d3', 'd4', 'd6', 'd8', 'd100', 'd12', 'd20', 'd10')\n dice = \"\"\n\n for d in viable_dice:\n if d in die:\n dice = d\n break\n\n if len(dice) == 0:\n return \"Wrong input!\"\n\n try:\n dice = int(dice.replace('d', \"\"))\n except (TypeError, ValueError):\n return \"Wrong input!\"\n return dice", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def roll_dice():\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)", "def roll_dice():\n print(colored(\"Lanzando tu dado...\", \"green\", attrs=['bold']))\n while True:\n dice = random.randint(1, 6)\n if dice != 3:\n return dice\n else:\n print(colored(\"Tu dado es 3, lancemos de nuevo\", \"green\", attrs=['bold']))\n continue", "def roll(*args):\n try:\n sides = int(args[0])\n except:\n return \"I need an integer if you want me to roll these dice.\"\n return \"The result is...{0}!\".format(randint(1, sides))", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def ask_dice():\n while True:\n res = input(\"Deseas lanzar tu dado? [yes / no]: \")\n if res == \"yes\":\n return True\n elif res == \"no\":\n print(\"Debes lanzar! Prueba tu suerte...\")\n else:\n print(\"Responde YES o NO... no seas cobarde...\")", "def diceRoll():\n return randint(1,6)", "def pick_dice(sorted_dice):\n print(f'\\nYour sorted dice result is: {sorted_dice}')\n player_picks = input(fill('Here is your sorted dice result. Please enter 1-4 unique numbers in the range of 1-5 to'\n ' represent the selection of dice you want to hold. the numbers represents the location '\n 'of die in the dice list from left to right. For example if you want to hold 2 dice that '\n 'are on the left of the sorted dice list, you will enter \"12\". Warning: if you enter '\n 'anything else, the system will treat it as if you choose not to hold any dice: ',\n TXT_WIDTH()))\n dice = [[], []]\n if re.match(r'^(?!.*(.).*\\1)[1-5]{1,4}$', player_picks):\n picks_list = [int(pick) for pick in player_picks]\n index_list = [pick - 1 for pick in picks_list]\n for index in index_list:\n dice[0].append(sorted_dice[index])\n for die in range(TOTAL_NUMBER_OF_DICE() - len(dice[0])):\n dice[1].append(0)\n else:\n for die in sorted_dice:\n dice[1].append(0)\n return dice", "def interactive_strategy(score, opponent_score):\r\n print('Current score:', score, 'to', opponent_score)\r\n while True:\r\n response = input('How many dice will you roll? ')\r\n try:\r\n result = int(response)\r\n except ValueError:\r\n print('Please enter a positive number')\r\n continue\r\n if result < 0:\r\n print('Please enter a non-negative number')\r\n else:\r\n return result", "def roll(dice):\n\n dice = str(dice).upper().strip()\n dice_mod = 0\n if dice == 'FLUX':\n return randint(1, 6) - randint(1, 6)\n else:\n if dice == 'GOODFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 < flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n else:\n if dice == 'BADFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 > flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n \n ichar1 = dice.find('DD')\n if ichar1 == -1:\n ichar1 = dice.find('D')\n if ichar1 == 0:\n num_dice = 1\n\n if ichar1 <> -1:\n if ichar1 <> 0:\n num_dice = int(dice[0:ichar1])\n# print 'Number of dice =', num_dice\n ichar2 = dice.find('+')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n else:\n ichar2 = dice.find('-')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n\n if ichar2 <> -1:\n dice_type = dice[ichar1: ichar2]\n dice_type = dice_type.rstrip()\n else:\n dice_type = dice[ichar1: len(dice)]\n# print 'dice type =', dice_type, 'Len = ', len(dice_type)\n\n if dice_type == 'D6': \n return die_rolls(6, num_dice) + dice_mod\n else:\n if dice_type == 'D66' and num_dice == 1 and dice_mod == 0:\n return randint(1, 6) * 10 + randint(1, 6)\n else:\n if dice_type == 'D100' and num_dice == 1: \n return (randint(1, 10) - 1) * 10 + randint(1, 10) + dice_mod \n else:\n if dice_type == 'D10': \n return die_rolls(10, num_dice) + dice_mod\n else: \n if dice_type == 'D20': \n return die_rolls(20, num_dice) + dice_mod\n else:\n if dice_type == 'D30': \n return die_rolls(30, num_dice) + dice_mod\n else:\n if dice_type == 'D12': \n return die_rolls(12, num_dice) + dice_mod\n else:\n if dice_type == 'D8': \n return die_rolls(8, num_dice) + dice_mod\n else:\n if dice_type == 'D4': \n return die_rolls(4, num_dice) + dice_mod\n else:\n if dice_type == 'D9': \n return die_rolls(9, num_dice) + dice_mod\n else:\n if dice_type == 'D3': \n return die_rolls(3, num_dice) + dice_mod\n else:\n if dice_type == 'DD':\n return (die_rolls(6, num_dice) + dice_mod) * 10\n \n print\n print \"** DICE ERROR! '%s' is unknown **\" % dice\n print \n print \"roll() is a dice rolling program.\"\n print\n print \"The types of dice to roll are (in string values):\"\n print \"roll('D6') -- roll one 6-sided die\"\n print \"roll('1D6') -- roll one 6-sided die\"\n print \"roll('2D6') -- roll two 6-sided dice\"\n print \"roll('D10') -- roll a 10-sided die\"\n print \"roll('D100') -- roll a 100-sided die (1 - 100)\"\n print \"roll('D66') -- roll for a D66 chart\"\n print \"roll('2DD+3') -- roll (2D6+3) x 10\"\n print\n print \"-/+ DMs can be added to rolls:\"\n print \"roll('3D6+6') -- add +6 DM to roll\"\n print \"roll('4D4-4') -- add -4 DM to roll\"\n print\n return 0", "def dice(name):", "def roll_input(self, user_input, optional_input):\n\n # Resets the status of everything before a new roll\n self.last_roll = []\n self.result = 0\n self.hidden = False\n # An empty error flag to easily throw errors back through discord\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n # The modifier is either a + or a -, stored for easy acces\n self.modifier = ''\n self.modifier_number = ''\n self.dropped_roll = ''\n\n # this code is run in try to catch atribute errors due to a wrong input\n try:\n # All parts filtered by regex get stored in an object,\n # that then gets split\n split_input = dice_handler.match(user_input)\n # sets the number of dice in the class for use in other functions\n self.number_of_dice = split_input.group(1)\n # sets the size of the dice in the class for use in other functions\n self.size_of_dice = split_input.group(3)\n # sets the +/- in the class for use in other functions\n self.modifier = split_input.group(5)\n # sets the number of the mod in the class to use in other functions\n self.modifier_number = split_input.group(6)\n\n # An if statements that alows typing 1 for rolling 1 dice to be\n # optional.\n if self.number_of_dice == '':\n self.number_of_dice = 1\n # Makes sure atleast 1 dice is rolled\n if self.number_of_dice == '0':\n self.error = \"You can't roll 0 dice\"\n # Sets a cap of 200 dice being rolled\n if int(self.number_of_dice) > 200:\n self.error = \\\n 'No! Thats to many dice I do not have that many!!!'\n return\n\n # Meant to catch errors where a none size dice managed to sneak\n # Through.\n if self.size_of_dice == '0' or self.size_of_dice is None:\n self.error = \"Please define the dice size.\"\n # Sets a cap on how large of a dice you can roll.\n if int(self.size_of_dice) > 50000:\n self.error = \"Dice too big!\" + \\\n \" That has gotta be fake nothing goes this high\"\n return\n\n # Checks wether no modifier was entered or if it was incorrectly\n # entered by checking the lenght of the input vs what came through.\n if self.modifier is None and \\\n len(str(user_input)) > \\\n len(str(self.number_of_dice) +\n str(self.size_of_dice) + 'D'):\n self.error = \" Incorrect modifier. Please use + or -\"\n return\n\n # Sets modifier to +0 if no +/- is entered.\n if self.modifier == '' or self.modifier is None:\n self.modifier = '+'\n self.modifier_number = '0'\n\n # Sets modifier to +0 if no number for it was entered\n if self.modifier_number == '' or self.modifier_number is None:\n self.modifier = '+'\n self.modifier_number = '0'\n\n # The full input of the user in 1 flag to print back to the user\n # at the end.\n self.input_last_roll = \\\n ' `Rolled ' + \\\n str(self.number_of_dice) + \\\n 'd' + \\\n str(self.size_of_dice) + \\\n str(self.modifier) + \\\n str(self.modifier_number) + \\\n ':` '\n\n if optional_input.lower() == 'adv':\n self.adv = 'adv'\n self.handle_adv()\n\n # Checks if user asked for disadvantage on a roll and hands it off\n elif optional_input.lower() == 'dadv':\n self.adv = 'dadv'\n self.handle_adv()\n\n # Checks if user asked for a sorted roll\n elif optional_input.lower() == 'sort':\n # Rolls the dice like normal but sorts the flag after.\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n elif optional_input.lower() == 'hide':\n # Rolls the dice like normal but does not show the result in channel.\n self.hidden = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n elif optional_input.lower() != '':\n self.error = str(optional_input) + \\\n \" is not a valid option. Please try (sort/adv/dadv/hide)\"\n\n else:\n # If everything passed the checks hand offs the proccesed input\n # to the randomizing and calculating functions.\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n # Catches and attribute error on a wrong input and notifies the user.\n except AttributeError:\n self.error = \\\n \" Invalid input please follow this format (1)d20(+/-(5))\"\n except ValueError:\n self.error = \\\n \" Invalid input, please Make sure dice size is bigger than 0\"", "async def roll(self, ctx: commands.context, dice: str):\n\n if dice:\n lexer = DiceLexer()\n parser = DiceParser()\n\n try:\n step_data, result = parser.parse(lexer.tokenize(dice))\n except TypeError:\n await ctx.send(\"There was an error with your roll syntax. Please try again.\")\n return\n\n if result.is_integer():\n result = int(result)\n\n color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])\n title = f'Roll for {ctx.author.name}'\n description = f'**Result:**\\n' \\\n f'```\\n' \\\n f'{result}\\n' \\\n f'```\\n' \\\n f'**Steps:**\\n' \\\n f'```\\n'\n for step in step_data:\n description += step + '\\n'\n description += '```'\n\n embed = Embed(color=color, title=title, description=description)\n\n await ctx.send(embed=embed)", "def play(self):\n\n input(\"\"\"\nWelcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\nStage 1 you need to roll 1 & 2\nStage 2 you need to roll ANGRY & 4\nStage 3 you need to roll 5 & 6\nYou can lock a die needed for your current stage\nand just roll the other one, but beware!\nIf you ever get 2 ANGRY's at once, you have to restart to Stage 1!\nAlso, you can never lock a 6! That's cheating!\n\nTo rol the dice, simply input the name of the die you want to roll.\nTheir names are a and b.\n\nPress ENTER to start!\n \"\"\")\n self.cheating = self.roll_parse(\"ab\")\n done = False\n while not done:\n self.print_hand()\n decision = input(\"Roll dice: \")\n self.cheating = self.roll_parse(decision)\n done = self.advance_check()\n self.print_hand()\n print(\"You've won! Calm down!\")", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def die_roll():\n roll = random.randint(1,6)\n return roll", "async def roll(self, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await self.bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await self.bot.say(result)", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def request_action(self):\n\n # Return the player's input\n return input(\"Enter 'r' to roll the die, or 'h' to hold. What you you like to do? \")", "async def roll(ctx, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await ctx.send('Format has to be in NdN!')\n return\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await ctx.send(result)" ]
[ "0.75145817", "0.73645425", "0.73168164", "0.71421844", "0.71233743", "0.7100476", "0.70834786", "0.7075699", "0.7033684", "0.70306945", "0.70136887", "0.7012352", "0.692733", "0.6923469", "0.68955934", "0.6895005", "0.6886274", "0.68699884", "0.6821573", "0.6815672", "0.6813377", "0.68031955", "0.6774458", "0.67636305", "0.6753952", "0.6720468", "0.6661369", "0.66070294", "0.6544697", "0.65426755" ]
0.75796336
0
Check the state of the game and if conditions are met to advance the player to the next stage.
def check_stage(self): #Initalize target and goal_stage to stage1 values target = 3 goal_stage = 2 # Set target and goal_stage if current stage is not 1 if self.current_stage == 2: target = 7 goal_stage = 3 elif self.current_stage == 3: target = 11 goal_stage = 4 # Check the stage goals if self.die_a.value + self.die_b.value == target and not self.cheating: self.current_stage = goal_stage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def playStage(startState, playerSkills, otherSkills):\n state = startState\n # This is where the flow of game will occur\n while (state):\n state.display()\n # Check whether the state is final or dead state.\n if state.isFinalState():\n return True\n elif state.isDeadState():\n return False\n\n # Updating skills if certain scenarios are reached.\n if state == crashState4:\n playerSkills.communication = True\n otherSkills.add(\"Communication\")\n elif state == crashState3:\n playerSkills.fishing = True\n otherSkills.add(\"Fishing\")\n elif state == crashState5:\n playerSkills.fishing = True\n otherSkills.add(\"Fishing\")\n \n transition = state.getTransitions()\n\n alphabets = state.getAlphabetToOption()\n if 'E' in transition: # Epsilon transition\n state = transition['E']\n else:\n # Make transition to another state based on user input.\n while True:\n userChoice = input(\"\\nEnter option A or B\\n\")\n if userChoice not in 'AB':\n print(\"Please enter a valid input\\n\")\n continue\n \n nextState = transition[alphabets[userChoice]]\n if (nextState.getSkillNeeded() == '') or (\n nextState.getSkillNeeded() in otherSkills):\n print(state.getSkillNeeded())\n state = transition[alphabets[userChoice]]\n break\n else:\n print(\n \"\\nSorry you don't have %s skill yet!\\n\\nChoose \"\n \"some other option\" %state.getSkillNeeded())", "def test_is_advancing_to_next_stage_yes(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 1, True),\n (\"2\", \"1\", 1, True),\n (\"ANGRY\", \"4\", 2, True),\n (\"4\", \"ANGRY\", 2, True),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def __advance(self):\n # If the game is being prepared.\n if self.__current_phase == self.PHASE_PREPARE:\n # If both players are ready.\n if self.__get_current_player().pre_game_prepare() and self.__get_other_player().pre_game_prepare():\n # Start the turn.\n self.__current_phase = self.PHASE_START_TURN\n\n # Begin the game for each player.\n self.__get_current_player().start_game()\n self.__get_other_player().start_game()\n\n # If the game is being set up.\n elif self.__current_phase == self.PHASE_START_TURN:\n # Advance onto the request fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n\n # Call the start turn method for both players.\n self.__get_current_player().start_turn()\n self.__get_other_player().start_turn()\n\n # If the game requires the user to shoot.\n elif self.__current_phase == self.PHASE_REQUEST_SHOT:\n # Advance onto the fire phase.\n self.__current_phase = self.PHASE_FIRE\n\n # Call the shoot method of the user.\n self.__get_current_player().request_shot()\n\n # If the game requires the other user to be hit.\n elif self.__current_phase == self.PHASE_REQUEST_HIT:\n # Advance onto the hit phase.\n self.__current_phase = self.PHASE_HIT\n\n # Call the other player's request hit method.\n self.__get_other_player().request_hit(self.__current_fire_location)\n\n # If the game shows the hit result.\n elif self.__current_phase == self.PHASE_SHOW_HIT:\n # Advance onto the await phase.\n self.__current_phase = self.PHASE_AWAIT_OPPONENT_SHOT\n\n # Call the player's show hit method.\n self.__get_current_player().show_hit(self.__current_fire_location, self.__current_fire_effect)\n\n # If the game awaits the next shot.\n elif self.__current_phase == self.PHASE_AWAIT_OPPONENT_SHOT:\n # If the opponent has lost.\n if self.__current_fire_effect == Player.SHOT_HIT_TYPE_GAME_OVER:\n # Store the winner's index.\n engine.Engine.game_manager.winner = self.current_player_index\n # Move to the game over phase.\n engine.Engine.load_level(\"GameOver\")\n else:\n # Call the player's await hit method.\n self.__get_current_player().await_opponent_shot()\n\n # If the turn is over.\n if self.current_player_index == 1:\n # Advance to the next turn.\n self.__current_phase = self.PHASE_END_TURN\n else:\n # Advance onto the next fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n # Increment the user counter.\n self.current_player_index = 1\n\n elif self.__current_phase == self.PHASE_END_TURN:\n # Start a new turn.\n self.__current_phase = self.PHASE_START_TURN\n # Decrement the user counter.\n self.current_player_index = 0\n\n # Call the end turn methods.\n self.__get_current_player().end_turn()\n self.__get_other_player().end_turn()", "def is_advancing_to_next_stage(self):\n if self.game_stage == 1:\n return (self.die_a.current_value == \"1\" and self.die_b.current_value == \"2\" or\n self.die_a.current_value == \"2\" and self.die_b.current_value == \"1\")\n if self.game_stage == 2:\n return (self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"4\" or\n self.die_a.current_value == \"4\" and self.die_b.current_value == \"ANGRY\")\n if self.game_stage == 3:\n return False", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def advance_stage(self):\n if self.stage == 0:\n self.curr_i = self.I\n elif self.stage == 1:\n self.curr_d = self.D\n elif self.stage == 2:\n self.curr_r == self.R", "def _active(self):\n self._soundhelper()\n k = self._game.getPlayerLives()\n self._movePaddle()\n self._game.moveBall(self._sound)\n if self._game.getPlayerLives() == 0:\n self._state = STATE_COMPLETE\n elif self._game.getPlayerLives() < k:\n self._state = STATE_PAUSED", "def check_angry(self):\n if self.die_a.value == 3 and self.die_b.value == 3:\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.current_stage = 1", "def step(self):\n\n data = (self._world, self._player)\n self._world.step(data)\n\n self.scroll()\n self.redraw()\n # check whether dead\n if self._player.get_health() == 0: # if died\n answer = messagebox.askyesno(\"GAME OVER\", \"Would you like to restart?\")\n if answer: # have a choice to reset the current level\n self.reset_world(self._filename)\n else: # or quit the game\n self.exit()\n # control the pause/restart\n if self._pause: # if is paused\n return # step will stop its process\n else: # not paused\n self._master.after(10, self.step) # go! next step", "def test_is_advancing_to_next_stage_no(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 2, False),\n (\"2\", \"1\", 3, False),\n (\"1\", \"1\", 1, False),\n (\"1\", \"1\", 2, False),\n (\"1\", \"1\", 3, False),\n (\"ANGRY\", \"1\", 1, False),\n (\"ANGRY\", \"1\", 2, False),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def step(self) -> int:\n # select the appropriate controller and player ID from the game state\n if self.move % 2 == 0:\n ctl = self.controllers[0]\n player = self.board.PLAYER_1\n else:\n ctl = self.controllers[1]\n player = self.board.PLAYER_2\n if self.won: # if the game was already completed return the appropriate status for the current player (WIN/LOSS)\n self.move += 1\n if player == self.won:\n return self.board.WIN\n return self.board.LOSS\n\n selected_move = ctl.play(player) # player selects the move\n\n status = self.board.play(selected_move, player) # play the selected move\n if status == self.board.INVALID_MOVE:\n return status\n if status == self.board.WIN:\n self.won = player\n self.move += 1\n return status", "def start_game(self):\n p1_move = True\n is_all_moves_over = False\n while not is_all_moves_over:\n\n while p1_move and not is_all_moves_over:\n p1 = int(input(\"Player 1 pos:\"))\n is_all_moves_over, p1_move = self.play('p1', p1, p1_move)\n\n while not p1_move and not is_all_moves_over:\n p2 = int(input(\"Player 2 pos:\"))\n is_all_moves_over, p1_move = self.play('p2', p2, p1_move)\n\n print(\"Game Ended in Draw\")", "def step(self):\n self.game.step()", "def change_player_state(self):\n if self.active_player.get() is True:\n # Get game phase and unlock respective buttons?\n # or should game do that\n pass\n else:\n pass\n #self.disable_all_buttons()", "def _run_state(self):\n main_log.debug(\"Running state \" + self.state)\n\n if not self.get_state_info(\"condition\"):\n self._run_next_state()\n return\n\n try:\n self._pre()\n except StateSwitchException as e:\n self.state = e.next_state\n self._run_state()\n return\n\n if self.get_state_info(\"wake_players\"):\n self._waiting_for_players = True\n self._wake_players()\n else:\n self._players_are_done()", "def play(self):\n for step_i in range(self.max_step):\n player_id = step_i & 1\n player = self.players[player_id]\n action = player.nxt_move()\n if isinstance(player, mcts.MCTSPlayer) and player.value_net.loggable:\n print(f'Player{player_id}: Action: {action}')\n if not self.is_valid_action(action):\n # because now just consider 2 players\n print(f\"Player: {player_id}, Action: {action} Did Not choose a valid action!\")\n self.board[action // self.w][action % self.w] = player_id\n self.winner = 1 - player_id\n else:\n self.board[action // self.w][action % self.w] = player_id\n self.winner = self.k0()\n self.players[1 - player_id].other_nxt_move(action)\n if self.winner != -1:\n break\n print(f'Winner: {self.winner}')\n for player_id in range(len(self.players)):\n self.players[player_id].game_ended()", "def player_stage(niv): \n playing = True\n a = niv[0][0] \n b = niv[0][1] \n (x, y) = (a, b) \n state = [[a, b]] #Create a list with the starting point of the selected level patern.\n sense.stick.get_events()\n while playing:\n for event in sense.stick.get_events(): #It moves the pixel with the player moves and add the point passed by the player in the state[].\n if event.action == 'pressed':\n if event.direction == 'left':\n if x > 0:\n x = min(x-1, 7)\n state.append([x, y])\n elif event.direction == 'right':\n if x < 7:\n x = max(x+1, 0)\n state.append([x, y])\n if event.direction == 'down':\n if y < 7:\n y = min(y+1, 7)\n state.append([x, y])\n elif event.direction == 'up':\n if y > 0:\n y = max(y-1, 0)\n state.append([x, y])\n elif event.direction == 'middle':\n playing = False\n sense.set_pixel(x, y, RED)\n if state[:] == niv[:]: #Compare the way choosen by the player with the selected level patern. Results of the try.\n sense.show_message(\"WINNER !\",\n text_colour=LEMON, scroll_speed=0.05)\n sleep(2)\n main() #brings back to the level selection.\n else:\n sense.show_message(\"LOSER !\",\n text_colour=BLUE, scroll_speed=0.05)\n sleep(2)\n try_again(niv) #cf. try_again() function", "def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()", "def update(self):\r\n if not self.tr.game_over and self.tr.turn_tracker:\r\n self.computer_play()", "def play_game(game, *players):\n state = game.initial\n while True:\n for player in players:\n print \"now move for player \", player\n move = player(game, state) # update move\n state = game.make_move(move, state) # update game state\n print '---'\n game.display(state) # display board\n print '---'\n \n if move == None or game.terminal_test(state): #check game end\n if game.utility(state,'X')==1:\n print 'X has won!'\n elif game.utility(state,'O')==1:\n print 'O has won!'\n else:\n print 'Its A Draw!'\n return #exit", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return", "def play_game(self, early_finish=False):\n automated = self.game_type == self.game_types['vs_ai']\n while not (self.check_for_end_of_game() or (early_finish and self.check_for_early_finish())):\n self.play_single_turn()\n if automated:\n input('Press any key to continue.\\n')\n\n if self.player_1.score > self.player_2.score:\n print(\"player 1 wins!\" if automated else \"you win!\")\n elif self.player_2.score > self.player_1.score:\n print(\"player 2 wins!\" if automated else \"you lose!\")\n else:\n print(\"it was a tie!\")\n self.game_board.graphical_output(block=True)", "def go_to_next_state(self):\n pass", "def play_self_play_game(self):\n\n # start a fresh game \n self.reset_game()\n \n # play the epsilon greedy move and save the state transition in the experience lists \n while not self.game_terminal():\n self.epsilon_greedy_move()", "def _advance_to_running(self):\n if all(character.status == Character.FINISHED for character in self.characters.filter(Q(owner__in=self.get_players()))):\n self.status = Game.RUNNING\n self.save()\n else:\n raise ValidationError('All player characters must be approved before continuing.')", "def next_round(self):\n if self.finish_game == 3:\n self.restart_game()\n return\n\n atual_color = self.atual_player.color\n if self.board.valid_moves(atual_color).__len__() > 0:\n self.board.play(self.atual_player.play(self.board.get_clone()), atual_color)\n self.view.atualizar_discos()\n self.finish_game = 0\n else:\n self.finish_game += 1\n self.atual_player = self._opponent(self.atual_player)\n\n self.view.atualizar_jogador_atual(self.atual_player.color)\n\n if self.finish_game == 2:\n self._end_game()", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True" ]
[ "0.6904773", "0.680145", "0.6782129", "0.6746176", "0.672846", "0.67035085", "0.6647363", "0.66338056", "0.660885", "0.65491366", "0.6462665", "0.6453733", "0.6429156", "0.6387714", "0.63038003", "0.62731713", "0.62604964", "0.6260225", "0.621723", "0.62048525", "0.6200238", "0.6197657", "0.6197143", "0.61751944", "0.61643964", "0.61499935", "0.61339843", "0.6128897", "0.61035657", "0.6098936" ]
0.7219316
0
Checks to see if both dice are Angry, if so, sets current_stage to 1
def check_angry(self): if self.die_a.value == 3 and self.die_b.value == 3: print("WOW, you're ANGRY!\nTime to go back to Stage 1!") self.current_stage = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_angry_dice(self):\n if self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.game_stage = 1", "def check_stage(self):\n\n #Initalize target and goal_stage to stage1 values\n target = 3\n goal_stage = 2\n\n # Set target and goal_stage if current stage is not 1\n if self.current_stage == 2:\n target = 7\n goal_stage = 3\n elif self.current_stage == 3:\n target = 11\n goal_stage = 4\n\n # Check the stage goals\n if self.die_a.value + self.die_b.value == target and not self.cheating:\n self.current_stage = goal_stage", "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def is_advancing_to_next_stage(self):\n if self.game_stage == 1:\n return (self.die_a.current_value == \"1\" and self.die_b.current_value == \"2\" or\n self.die_a.current_value == \"2\" and self.die_b.current_value == \"1\")\n if self.game_stage == 2:\n return (self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"4\" or\n self.die_a.current_value == \"4\" and self.die_b.current_value == \"ANGRY\")\n if self.game_stage == 3:\n return False", "def test_is_advancing_to_next_stage_yes(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 1, True),\n (\"2\", \"1\", 1, True),\n (\"ANGRY\", \"4\", 2, True),\n (\"4\", \"ANGRY\", 2, True),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def _test_is_game_over(self, die_a_value, die_b_value, stage, ok_output):\n\n self.new_angry_dice.die_a.current_value = die_a_value\n self.new_angry_dice.die_b.current_value = die_b_value\n self.new_angry_dice.game_stage = stage\n\n self.assertEqual(self.new_angry_dice.is_advancing_to_next_stage(),\n ok_output,\n \"Incorrect output for die_a:{}, \"\n \"die_b:{} in stage:{}.\"\n .format(die_a_value, die_b_value, stage))", "def check_cheating(self, dice=[]):\n\n #Assume they're not cheating until proven guilty\n self.cheating = False\n\n if self.current_stage == 3:\n if self.die_a not in dice and (self.die_a.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True\n elif self.die_b not in dice and (self.die_b.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True", "def main(self):\n self.display_welcome_message()\n print(self.die_a.__dict__)\n # Once the user hits ENTER the game starts.\n input(\"Press ENTER to start!\")\n # Roll both dice at the beginning of the game\n self.roll_dice([\"a\", \"b\"])\n # If player rolled two \"ANGRY\" dice, display 'you are angry'\n # message and reset stage value.\n self.handle_angry_dice()\n # Advance to Stage 2 if rolled 1 and 2\n if self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Loop on sequence below until user wins\n while not self.game_won:\n # Get user to select dice to roll.\n dice_to_roll = self.process_user_input()\n # Handle cheating\n self.register_player_cheating(self.die_a, dice_to_roll)\n self.register_player_cheating(self.die_b, dice_to_roll)\n # Display message to indicate if the player tried to cheat\n if self.just_cheated_a and self.just_cheated_b:\n print(\"You're cheating! You cannot win until you reroll both \"\n \"dice!\")\n elif self.just_cheated_a or self.just_cheated_b:\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n # If user attempts to illegally hold die 'a' in current stage ...\n if \"a\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_a):\n # Force rolling die 'a'\n dice_to_roll.append(\"a\")\n # If user attempts to illegally hold die 'b' in current stage ...\n if \"b\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_b):\n # Force rolling die 'b'\n dice_to_roll.append(\"b\")\n # Roll dice.\n self.roll_dice(dice_to_roll)\n # If player rolled two \"ANGRY\" dice, display \"you are angry\"\n # message and reset stage value.\n self.handle_angry_dice()\n # If player won, update game_won attribute break out of this loop.\n if self.is_game_over():\n self.game_won = True\n break\n # If player advances to next stage, increment game_stage attribute\n elif self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Player won! Display victory message!\n print(\"You've won! Calm down!\")", "def test_is_advancing_to_next_stage_no(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 2, False),\n (\"2\", \"1\", 3, False),\n (\"1\", \"1\", 1, False),\n (\"1\", \"1\", 2, False),\n (\"1\", \"1\", 3, False),\n (\"ANGRY\", \"1\", 1, False),\n (\"ANGRY\", \"1\", 2, False),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def play(self):\n\n input(\"\"\"\nWelcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\nStage 1 you need to roll 1 & 2\nStage 2 you need to roll ANGRY & 4\nStage 3 you need to roll 5 & 6\nYou can lock a die needed for your current stage\nand just roll the other one, but beware!\nIf you ever get 2 ANGRY's at once, you have to restart to Stage 1!\nAlso, you can never lock a 6! That's cheating!\n\nTo rol the dice, simply input the name of the die you want to roll.\nTheir names are a and b.\n\nPress ENTER to start!\n \"\"\")\n self.cheating = self.roll_parse(\"ab\")\n done = False\n while not done:\n self.print_hand()\n decision = input(\"Roll dice: \")\n self.cheating = self.roll_parse(decision)\n done = self.advance_check()\n self.print_hand()\n print(\"You've won! Calm down!\")", "def is_goal(self):\n if self.team1.get_cur_hp() == 0:\n return 1\n elif self.team2.get_cur_hp() == 0:\n return -1\n else:\n return 0", "def main(self):\n\n text = \"Welcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\\n\" \\\n \"Stage 1 you need to roll 1 & 2\\n\" \\\n \"Stage 2 you need to roll ANGRY & 4\\n\" \\\n \"Stage 3 you need to roll 5 & 6\\n\" \\\n \"You can lock a die needed for your current stage \\n\" \\\n \"and just roll the other one, but beware!\\n\" \\\n \"If you ever get 2 ANGRY's at once, you have to restart to Stage 1!\\n\" \\\n \"Also, you can never lock a 6! That's cheating!\\n\\n\" \\\n \"To roll the dice, simply input the name of the die you want to roll.\\n\" \\\n \"Their names are a and b.\\n\"\n print(text)\n input(\"Press ENTER to start!\")\n\n # Check to see if we advance in stage\n self.check_stage()\n\n #Show inital state of the game\n self.print_dice()\n\n while self.current_stage != 4:\n # Prompt user what to reroll\n dice = self.determine_roll()\n\n # Check for cheating\n self.check_cheating(dice)\n\n # Roll the dice\n self.roll_the_dice(dice)\n\n # Check for ANGRY\n self.check_angry()\n\n # Check to see if we advance in stage\n self.check_stage()\n\n # Print the dice\n self.print_dice()\n\n\n # Congratulate them on winning\n print(\"You've won! Calm down!\")", "def hit(self):\r\n self.life -= 1\r\n total_score = 1\r\n if self.life == 0:\r\n self.alive = False\r\n total_score += self.bonus\r\n return total_score", "def advance_stage(self):\n if self.stage == 0:\n self.curr_i = self.I\n elif self.stage == 1:\n self.curr_d = self.D\n elif self.stage == 2:\n self.curr_r == self.R", "def print_dice(self):\n\n stage_to_print = 3 if self.current_stage == 4 else self.current_stage\n print(\"You rolled:\\n a = [ {} ]\\n b = [ {} ]\\n\\nYou are in Stage {}\"\n .format(self.die_a, self.die_b, stage_to_print))", "def env_step(self, action):\n if action == 0: # Hit\n\n new_state = deepcopy(self.current_state)\n reward = 0\n terminal = False\n \n new_card = min(self.random.randint(1,14), 10)\n # print('new card:', new_card)\n \n if new_card == 1:\n self.player_ace_count += 1\n new_state['player_sum'] = self.current_state['player_sum'] + 11 \n else:\n new_state['player_sum'] = self.current_state['player_sum'] + new_card\n\n while new_state['player_sum'] > 21 and self.player_ace_count > 0:\n self.player_ace_count -= 1\n new_state['player_sum'] -= 10\n\n new_state['usable_ace'] = int(self.player_ace_count > 0)\n\n if new_state['player_sum'] > 21: # Goes bust\n reward = -1\n terminal = True\n\n elif action == 1: # Stick\n\n new_state = deepcopy(self.current_state)\n terminal = True\n\n if self.current_state['dealer_card'] == 1:\n dealer_ace = 1\n dealer_sum = 11\n else:\n dealer_ace = 0\n dealer_sum = self.current_state['dealer_card']\n\n first_two_cards = True\n while dealer_sum < self.dealer_sticks or first_two_cards:\n first_two_cards = False\n # new_card = self.random.choice(range(1,11), p=self.card_probs)\n new_card = min(self.random.randint(1,14), 10)\n if new_card == 1:\n dealer_sum += 11\n dealer_ace += 1\n else:\n dealer_sum += new_card\n\n while dealer_sum > 21 and dealer_ace > 0:\n dealer_sum -= 10\n dealer_ace -= 1\n dealer_ace = int(dealer_ace > 0)\n # print('dealer:', new_card)\n\n # print('dealer sum:', dealer_sum)\n if dealer_sum > 21:\n reward = 1\n else:\n if new_state['player_sum'] > dealer_sum:\n reward = 1\n elif new_state['player_sum'] < dealer_sum:\n reward = -1\n else:\n reward = 0\n # reward = int(new_state['player_sum'] > dealer_sum) - int(new_state['player_sum'] < dealer_sum)\n\n else:\n raise Exception(\"Invalid action.\")\n\n self.current_state = new_state\n\n self.reward_obs_term = (reward, self.observation(self.current_state), terminal)\n\n return self.reward_obs_term", "def handle_adv(self):\n\n # This part handles advantage so it takes the highest of the 2 numbers\n # and then drops the lowest number\n if self.adv == 'adv':\n # Checks wether the number that was input is not 1 or 2.\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) != '1':\n self.error = 'Can only roll advantage with 2 dice, ya dummy!'\n\n # Checks if number of dice was left blank so automatically set to 1\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) == '1':\n self.number_of_dice = 2\n\n # Checks if the number of dice is 2 before moving on\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n # Stores the dropped roll before deleting it from last_roll\n self.dropped_roll = self.last_roll[0]\n del self.last_roll[0]\n # Returns flag to default state\n self.adv = False\n\n # This part handles disadvantage so it takes the lowest of the 2\n # numbers and then drops the highest number\n if self.adv == 'dadv':\n # Checks wether the number that was input is not 1 or 2.\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) != '1':\n self.error = \\\n 'Can only roll disadvantage with 2 dice, ya dummy!'\n\n # Checks if number of dice was left blank so automatically set to 1\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) == '1':\n self.number_of_dice = 2\n\n # Checks if the number of dice is 2 before moving on\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n # Stores the dropped roll before deleting it from last_roll\n self.dropped_roll = self.last_roll[1]\n del self.last_roll[1]\n # Returns flag to default state\n self.adv = False", "def shallIAssault(self, enemyShip):\n if (self.isAssault == 1 and enemyShip.isAssault == 0 and \n self.underAssault == 0 and enemyShip.underAssault == 0 and\n self.currentTarget == enemyShip):\n self.myGalaxy.setupAssaultBattle(self, enemyShip)", "def account_for_new_score(self):\n self.rolls += 1\n if self.continued is True:\n self.total_score += self.current_roll.score\n self.dice_remaining = self.current_roll.dice_remaining\n\n if self.dice_remaining == 0:\n self.resets += 1\n self.dice_remaining = 5", "def player_stand(self):\r\n if self.in_progress:\r\n while self.dealer_hand.total < 17:\r\n self.dealer_hand.add(self.deck.deal())\r\n if self.dealer_hand.total > 21 or self.dealer_hand.total < self.player_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"Player WINS... Press 'r' to start game\"\r\n self.player_wins += 1\r\n elif self.player_hand.total == self.dealer_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"TIE Game... Press 'r' to start game\"\r\n else:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "def check_score(self) -> None:\n self.player_1, self.player_2 = 0, 0\n for cell in self.cells:\n if cell.player == 1:\n self.player_1 += 1\n elif cell.player == 2:\n self.player_2 += 1", "def game(a,b, ):\n attacker, defender = a, b\n combatround = 0\n while a.hitpoints > 0 and b.hitpoints > 0:\n combatround += 1 # increase combatround by 1\n if a.stunned > 0:\n a.stunned -= 1\n if b.stunned > 0:\n b.stunned -= 1\n print()\n print(\"=================================\")\n print(\"combat round nr:\", combatround)\n print(\"attacker:\", attacker)\n print(\"defender:\", defender)\n print(\"=================================\")\n result = strike(attacker,defender)\n if result == None:\n break\n for line in result:\n print(line)\n if attacker == a and defender ==b:\n attacker, defender = b, a\n else:\n attacker, defender = a, b\n\n # game over \n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n if a.hitpoints > b.hitpoints:\n victor = a.name\n elif b.hitpoints > a.hitpoints :\n victor = b.name\n else:\n print(\"it is a draw\")\n victor = None\n print(\"victor:\", victor)", "def checkForPickup(self):\n if self.counter == 0:\n if self.game.player.reticule in self.overlapping_sprites and (games.keyboard.is_pressed(games.K_a) \\\n or games.keyboard.is_pressed(games.K_d)):\n self.counter = 15\n if self.held == 0:\n self.game.player.held_item = self\n self.held = 1\n self.y = self.game.player.y\n else:\n self.game.player.held_item = None\n self.held = 0", "def play_pig(A, B, dice=dice):\n state = (0, 0, 0, 0) # initial state\n player = (A, B)\n while True:\n (p, me, you, pending) = state\n if me >= goal:\n return player[p]\n elif you >= goal:\n return player[other[p]]\n decision = player[p](state) # get decision for player p\n if decision is 'roll':\n state = roll(state, next(dice))\n else:\n state = hold(state)", "def punched(self):\n if not self.dizzy:\n self.dizzy = 1\n self.original = self.image\n Chimp.count_punch += 1", "def game_over(self):\n red_minion = 0\n blue_minion = 0\n red_master = 0\n blue_master = 0\n only_masters = True\n for row in self.board:\n for piece in row:\n if piece != 0:\n if not piece.master:\n if piece.player:\n blue_minion += 1\n else:\n red_minion += 1\n only_masters = False\n else:\n if piece.player:\n blue_master += 1\n else:\n red_master += 1\n if blue_minion + blue_master == 0:\n self.winner = \"Red\"\n self.red_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif red_minion + red_master == 0:\n self.winner = \"Blue\"\n self.blue_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif only_masters:\n if red_master > blue_master:\n self.winner = \"Red\"\n self.red_victories += 1\n elif blue_master > red_master:\n self.winner = \"Blue\"\n self.blue_victories += 1\n else:\n self.winner = \"Nobody\"\n self.number_of_games +=1\n self.game_over_screen()\n return True\n \n return False", "def calculate_advantage(stage_0, stage_1):\n # Improvement in hp difference is good.\n hp_pct_0 = (float(stage_0.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_0.enemy_life)/MAX_ENEMY_LIFE)\n hp_pct_1 = (float(stage_1.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_1.enemy_life)/MAX_ENEMY_LIFE)\n return hp_pct_1 - hp_pct_0", "def roll(self):\n self._rollCount += 1\n self._die1.roll()\n self._die2.roll()\n (v1, v2) = (self._die1.getValue(),\n self._die2.getValue())\n self._lastRoll = (v1, v2)\n if self._initialSum == 0:\n initialSum = v1 + v2\n if initialSum in (2, 3, 12):\n return \"LOSE\"\n elif initialSum in (7, 11):\n return \"WIN\"\n else:\n return \"CONTINUE\"\n else:\n sum = v1 + v2\n if sum == 7:\n return \"LOSE\"\n elif sum == initialSum:\n return \"WIN\"\n else:\n return \"CONTINUE\"", "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False" ]
[ "0.7947746", "0.68952584", "0.67912954", "0.66577834", "0.657209", "0.65228903", "0.6070311", "0.6039068", "0.5986948", "0.58281", "0.57704735", "0.5715772", "0.5712143", "0.5680398", "0.56088614", "0.5588602", "0.55773765", "0.5547491", "0.55223674", "0.5517609", "0.5482465", "0.54440886", "0.54250765", "0.542501", "0.5411246", "0.54040784", "0.53986", "0.53901523", "0.53804255", "0.53694487" ]
0.74593896
1
In Stage 3, they can only hold a 5 valued die. If they hold a 6, they'll be found cheating and thus, cannot win, or advance to the next stage.
def check_cheating(self, dice=[]): #Assume they're not cheating until proven guilty self.cheating = False if self.current_stage == 3: if self.die_a not in dice and (self.die_a.value == 6): print("You're cheating! You cannot lock a 6! You cannot win " "until you reroll it!") self.cheating = True elif self.die_b not in dice and (self.die_b.value == 6): print("You're cheating! You cannot lock a 6! You cannot win " "until you reroll it!") self.cheating = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def sixteen_is_dead(players):\n \n number = setup_number_of_dices()\n faces = setup_number_of_faces()\n result_list = []\n for player in range(1, players+1):\n total_points = 0\n while total_points < 16:\n user_input = user_interface(player)\n if user_input == \"\":\n while True:\n user_input_2 = user_interface_2()\n if user_input_2 == \"\":\n dice_number = roll_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n else:\n dice_number = roll_cheating_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n if (total_points >= 16) or (total_points == 9) or (user_input == \"n\"):\n print()\n break\n if total_points < 16:\n result_list.append(total_points)\n else:\n print()\n break\n if total_points >= 16:\n print(\"Spieler\",player,\"hat das Spiel verloren!\")\n print()\n restart()\n else:\n player = 1\n for i in result_list:\n if i == min(result_list):\n print(\"Spieler\",player,\"hat das Spiel mit\",i,\"Punkten verloren!\")\n player += 1\n print()\n restart()", "def check_stage(self):\n\n #Initalize target and goal_stage to stage1 values\n target = 3\n goal_stage = 2\n\n # Set target and goal_stage if current stage is not 1\n if self.current_stage == 2:\n target = 7\n goal_stage = 3\n elif self.current_stage == 3:\n target = 11\n goal_stage = 4\n\n # Check the stage goals\n if self.die_a.value + self.die_b.value == target and not self.cheating:\n self.current_stage = goal_stage", "def check_angry(self):\n if self.die_a.value == 3 and self.die_b.value == 3:\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.current_stage = 1", "def main(self):\n self.display_welcome_message()\n print(self.die_a.__dict__)\n # Once the user hits ENTER the game starts.\n input(\"Press ENTER to start!\")\n # Roll both dice at the beginning of the game\n self.roll_dice([\"a\", \"b\"])\n # If player rolled two \"ANGRY\" dice, display 'you are angry'\n # message and reset stage value.\n self.handle_angry_dice()\n # Advance to Stage 2 if rolled 1 and 2\n if self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Loop on sequence below until user wins\n while not self.game_won:\n # Get user to select dice to roll.\n dice_to_roll = self.process_user_input()\n # Handle cheating\n self.register_player_cheating(self.die_a, dice_to_roll)\n self.register_player_cheating(self.die_b, dice_to_roll)\n # Display message to indicate if the player tried to cheat\n if self.just_cheated_a and self.just_cheated_b:\n print(\"You're cheating! You cannot win until you reroll both \"\n \"dice!\")\n elif self.just_cheated_a or self.just_cheated_b:\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n # If user attempts to illegally hold die 'a' in current stage ...\n if \"a\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_a):\n # Force rolling die 'a'\n dice_to_roll.append(\"a\")\n # If user attempts to illegally hold die 'b' in current stage ...\n if \"b\" not in dice_to_roll and self.is_die_held_in_wrong_stage(self.die_b):\n # Force rolling die 'b'\n dice_to_roll.append(\"b\")\n # Roll dice.\n self.roll_dice(dice_to_roll)\n # If player rolled two \"ANGRY\" dice, display \"you are angry\"\n # message and reset stage value.\n self.handle_angry_dice()\n # If player won, update game_won attribute break out of this loop.\n if self.is_game_over():\n self.game_won = True\n break\n # If player advances to next stage, increment game_stage attribute\n elif self.is_advancing_to_next_stage():\n self.game_stage += 1\n # Display current stage\n print(\"You are in Stage {}\".format(self.game_stage))\n # Player won! Display victory message!\n print(\"You've won! Calm down!\")", "def test_is_advancing_to_next_stage_no(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 2, False),\n (\"2\", \"1\", 3, False),\n (\"1\", \"1\", 1, False),\n (\"1\", \"1\", 2, False),\n (\"1\", \"1\", 3, False),\n (\"ANGRY\", \"1\", 1, False),\n (\"ANGRY\", \"1\", 2, False),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def four_d6_drop_lowest() -> list:\n rolls: List[int] = []\n for x in range(1, 7):\n new_val: int = 0\n i: int = 0\n while i < 7:\n roll: int = multi_die(3, 6)\n if roll >= new_val:\n new_val = roll\n i += 1\n rolls.append(new_val)\n return rolls", "def is_die_held_in_wrong_stage(self, die):\n if type(die) != type(die_class.Die()):\n raise TypeError(\"Expecting Die argument.\")\n if self.game_stage == 1:\n return die.current_value not in die.possible_values[0:2]\n if self.game_stage == 2:\n return die.current_value not in die.possible_values[2:4]\n if self.game_stage == 3:\n return die.current_value not in die.possible_values[4:6]", "def handle_angry_dice(self):\n if self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.game_stage = 1", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def test_is_advancing_to_next_stage_yes(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 1, True),\n (\"2\", \"1\", 1, True),\n (\"ANGRY\", \"4\", 2, True),\n (\"4\", \"ANGRY\", 2, True),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def select_dice(score, opponent_score):\r\n if (score+opponent_score)%7 == 0:\r\n return four_sided\r\n return six_sided", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def fives(dice):\n return sum([x for x in dice if x == 5])", "def safe_to_dance(self):\n # check for all fail/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"NOT SAFE TO DANCE!\")\n return False\n else: \n self.turn_by_deg(90) \n\n #after all checks have been done. We deduce it's safe\n print(\"SAFE TO DANCE!\")\n return True\n\n for x in range(3): \n self.shake()", "def play_for_dealer(self):\n while self.dealer.sum_cards() < 17:\n self.dealer.hit(self.deck)\n else:\n self.round_winner = True\n self.print_hands()\n self.determine_winner()", "def main(self):\n\n text = \"Welcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\\n\" \\\n \"Stage 1 you need to roll 1 & 2\\n\" \\\n \"Stage 2 you need to roll ANGRY & 4\\n\" \\\n \"Stage 3 you need to roll 5 & 6\\n\" \\\n \"You can lock a die needed for your current stage \\n\" \\\n \"and just roll the other one, but beware!\\n\" \\\n \"If you ever get 2 ANGRY's at once, you have to restart to Stage 1!\\n\" \\\n \"Also, you can never lock a 6! That's cheating!\\n\\n\" \\\n \"To roll the dice, simply input the name of the die you want to roll.\\n\" \\\n \"Their names are a and b.\\n\"\n print(text)\n input(\"Press ENTER to start!\")\n\n # Check to see if we advance in stage\n self.check_stage()\n\n #Show inital state of the game\n self.print_dice()\n\n while self.current_stage != 4:\n # Prompt user what to reroll\n dice = self.determine_roll()\n\n # Check for cheating\n self.check_cheating(dice)\n\n # Roll the dice\n self.roll_the_dice(dice)\n\n # Check for ANGRY\n self.check_angry()\n\n # Check to see if we advance in stage\n self.check_stage()\n\n # Print the dice\n self.print_dice()\n\n\n # Congratulate them on winning\n print(\"You've won! Calm down!\")", "def select_dice(score, opponent_score, dice_swapped):\n # BEGIN PROBLEM 4\n dice = six_sided\n if dice_swapped == True:\n dice = four_sided\n # END PROBLEM 3\n if (score + opponent_score) % 7 == 0:\n dice = reroll(dice)\n return dice", "def night_round(g):\n wolf_pick = random_pick(get_villager_ids(g))\n\n # if guard is alive\n if is_guard_alive(g):\n # he picks a dude\n g_pick = guard_pick(g)\n else:\n g_pick = 'guard is dead'\n # if chupa is alive\n\n if is_chupa_alive(g):\n \"\"\" he picks a dude\n # picks a random player (not himself)\n if that player is a werewolf,\n then that player dies.\n\n \"\"\"\n c_pick = random_pick(\n remove_id(all_ids(g), game_state['chupa_id']))\n is_c_pick_wolf = c_pick in get_wolf_ids(g)\n else:\n c_pick = None\n is_c_pick_wolf = False\n\n # guard save logic\n if wolf_pick == c_pick:\n # they picked same player\n if g_pick == wolf_pick or g_pick == c_pick:\n pass # guard blocks both\n else:\n # wolf_pick dies\n g = kill_player(g, wolf_pick)\n if is_c_pick_wolf:\n g = kill_player(g, c_pick) # chupa eats wolf\n elif g_pick != wolf_pick and g_pick != c_pick:\n g = kill_player(g, wolf_pick) # kill both picks\n if is_c_pick_wolf:\n g = kill_player(g, c_pick)\n elif g_pick == wolf_pick and is_c_pick_wolf: # only kill chupa pick\n g = kill_player(g, c_pick)\n elif g_pick == c_pick: # only kill wolf_pick\n g = kill_player(g, wolf_pick)\n\n\n # if seer is alive\n if is_seer_alive(g):\n # he picks a dude to investigate\n # make sure enough players left in game\n if len(all_ids(g)) > 1: # more than 1 person other than seer\n s_pick = seer_pick(g)\n resolve_seer_pick(g, s_pick)\n else:\n s_pick = 'seer is dead'\n\n return g", "def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move", "def play(self):\n\n input(\"\"\"\nWelcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\nStage 1 you need to roll 1 & 2\nStage 2 you need to roll ANGRY & 4\nStage 3 you need to roll 5 & 6\nYou can lock a die needed for your current stage\nand just roll the other one, but beware!\nIf you ever get 2 ANGRY's at once, you have to restart to Stage 1!\nAlso, you can never lock a 6! That's cheating!\n\nTo rol the dice, simply input the name of the die you want to roll.\nTheir names are a and b.\n\nPress ENTER to start!\n \"\"\")\n self.cheating = self.roll_parse(\"ab\")\n done = False\n while not done:\n self.print_hand()\n decision = input(\"Roll dice: \")\n self.cheating = self.roll_parse(decision)\n done = self.advance_check()\n self.print_hand()\n print(\"You've won! Calm down!\")", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def is_advancing_to_next_stage(self):\n if self.game_stage == 1:\n return (self.die_a.current_value == \"1\" and self.die_b.current_value == \"2\" or\n self.die_a.current_value == \"2\" and self.die_b.current_value == \"1\")\n if self.game_stage == 2:\n return (self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"4\" or\n self.die_a.current_value == \"4\" and self.die_b.current_value == \"ANGRY\")\n if self.game_stage == 3:\n return False", "def choosePiece(pieceList):\n dice = [1, 2, 3, 4, 5, 6]\n if len(pieceList) > 1:\n diceRoll = random.choice(dice)\n print(\"Dice Roll:\", diceRoll)\n if not any(piece for piece in pieceList if piece.value == diceRoll):\n # Piece is dead, finds next highest/lowest\n nextUp = -1\n nextDown = -1\n for i in range(diceRoll + 1,6):\n if any(piece for piece in pieceList if piece.value == i):\n nextUp = i\n break\n for i in range(diceRoll - 1, -1, -1):\n if any(piece for piece in pieceList if piece.value == i):\n nextDown = i\n break\n if nextUp == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextDown\n elif nextDown == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextUp\n else:\n print(\"Piece \", diceRoll, \" is dead. Choose \", nextDown, \" or \", nextUp, \".\", sep = '')\n diceRoll = input(\"> \")\n # Obtains user input\n while(diceRoll != str(nextUp) and diceRoll != str(nextDown)):\n diceRoll = input(\"Invalid choice. Please try again.\\n> \")\n diceRoll = int(diceRoll, base = 10)\n else:\n diceRoll = pieceList[0].value\n print(\"Only 1 piece left.\")\n\n return [piece for piece in pieceList if piece.value == diceRoll][0]", "def env_step(self, action):\n if action == 0: # Hit\n\n new_state = deepcopy(self.current_state)\n reward = 0\n terminal = False\n \n new_card = min(self.random.randint(1,14), 10)\n # print('new card:', new_card)\n \n if new_card == 1:\n self.player_ace_count += 1\n new_state['player_sum'] = self.current_state['player_sum'] + 11 \n else:\n new_state['player_sum'] = self.current_state['player_sum'] + new_card\n\n while new_state['player_sum'] > 21 and self.player_ace_count > 0:\n self.player_ace_count -= 1\n new_state['player_sum'] -= 10\n\n new_state['usable_ace'] = int(self.player_ace_count > 0)\n\n if new_state['player_sum'] > 21: # Goes bust\n reward = -1\n terminal = True\n\n elif action == 1: # Stick\n\n new_state = deepcopy(self.current_state)\n terminal = True\n\n if self.current_state['dealer_card'] == 1:\n dealer_ace = 1\n dealer_sum = 11\n else:\n dealer_ace = 0\n dealer_sum = self.current_state['dealer_card']\n\n first_two_cards = True\n while dealer_sum < self.dealer_sticks or first_two_cards:\n first_two_cards = False\n # new_card = self.random.choice(range(1,11), p=self.card_probs)\n new_card = min(self.random.randint(1,14), 10)\n if new_card == 1:\n dealer_sum += 11\n dealer_ace += 1\n else:\n dealer_sum += new_card\n\n while dealer_sum > 21 and dealer_ace > 0:\n dealer_sum -= 10\n dealer_ace -= 1\n dealer_ace = int(dealer_ace > 0)\n # print('dealer:', new_card)\n\n # print('dealer sum:', dealer_sum)\n if dealer_sum > 21:\n reward = 1\n else:\n if new_state['player_sum'] > dealer_sum:\n reward = 1\n elif new_state['player_sum'] < dealer_sum:\n reward = -1\n else:\n reward = 0\n # reward = int(new_state['player_sum'] > dealer_sum) - int(new_state['player_sum'] < dealer_sum)\n\n else:\n raise Exception(\"Invalid action.\")\n\n self.current_state = new_state\n\n self.reward_obs_term = (reward, self.observation(self.current_state), terminal)\n\n return self.reward_obs_term", "def die():\n return random.randint(1,6)", "def threes(dice):\n return sum([x for x in dice if x == 3])", "def test_sevenreds(self):\n seq = [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1]\n _SR = SevenReds(table=self.table, stake=100, rounds_to_go=100)\n\n for i in range(76):\n\n seed = seq.pop(0)\n self.nonrandom.set_seed(seed)\n self.game.cycle(_SR)\n seq.append(seed)\n if i < 7:\n # there should be non betting until round 8\n self.assertEqual(_SR.stake, 100)\n # 5 wins, 5 loses\n self.assertEqual(_SR.stake, 104)", "def tile_picked(self):\n assert len(self.hand) == 5\n self.turn_count += 1", "def make_turn(self):\n # if play first, start in the middle\n if np.count_nonzero(self.board) == 0:\n self.place_disc(self.board.shape[1] / 2)\n return 1\n\n\n # win if possible\n for try_column in range(0,self.board.shape[1]):\n if 0 == self.board[0, try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id())\n if dhw.did_he_win(new_board, self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't loose if in danger\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id())\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't fall in trap!\n forbidden_columns = []\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id()) # my move\n new_board = self.simulate_place_disc(new_board, try_column, 3 - self.id()) # enemy move\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # don't ruin my trap\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id()) # 'my' move\n new_board = self.simulate_place_disc(new_board, try_column, self.id()) # my move\n if dhw.did_he_win(new_board, self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # allow forbidden_columns if no other choice\n if np.count_nonzero(self.board[0, :]) == self.board.shape[1] - len(forbidden_columns):\n forbidden_columns = []\n\n # otherwise, play randomly\n rannum = random.randrange(7)\n while 0 != self.board[0, rannum] or rannum in forbidden_columns:\n rannum = random.randrange(7)\n self.place_disc(rannum)\n return 1" ]
[ "0.7061071", "0.6833772", "0.6340773", "0.63359606", "0.6290116", "0.628443", "0.6203253", "0.6173044", "0.6135499", "0.61070085", "0.61038446", "0.60700893", "0.6055959", "0.60029256", "0.59974277", "0.5941838", "0.5928955", "0.59277576", "0.590705", "0.5904662", "0.5890709", "0.58808976", "0.5874216", "0.5866259", "0.58655244", "0.58587474", "0.5844912", "0.5842903", "0.58267194", "0.58148116" ]
0.7265603
0
This function locates all nearby cities within num_hops from the given city. It maintains a set of all the cities visited from the starting city at each hop. After completion, it removes the original city from the list of results
def find_nearby_cities(graph: TeleportGraph, city: str, num_hops: int = 1) -> set: if num_hops == 0: return set() start_city_node = graph.find_city_node(city) city_nodes = {start_city_node} for i in range(num_hops): related_cities = set() # for every city in the current set, find all its related cities and add them to the global list of cities for city in city_nodes: related_cities |= city.related_cities city_nodes |= related_cities # The starting city cannot be near itself. It will always be added to the set because we have # bi-directional (undirected) edges between cities. city_nodes.remove(start_city_node) return {city.name for city in city_nodes}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FindDHopCities(self, X, d):\n # G = nx.Graph()\n # G.add_nodes_from(self.nodes)\n # G.add_edges_from(self.edges)\n\n # airports_id_in_city = self.airports.loc[self.airports['city'] == X, 'airport_id'].to_list()\n\n # cities_h_hop = set()\n # for airport in airports_id_in_city:\n # airports_h_hop = nx.descendants_at_distance(G, airport, h)\n # for airport_h_hop in airports_h_hop:\n # cities_h_hop.add(self.GetCityFromAirportId(airport_h_hop))\n\n # return cities_h_hop\n\n graph_adj = self.graph\n\n airports_id_in_city = self.airports.loc[self.airports['city'] == X, 'airport_id'].to_list()\n cities_d_hop = set()\n for airport in airports_id_in_city:\n airports_d_hop = set()\n current_distance = 0\n queue = {airport}\n visited = {airport}\n \n # BFS\n while queue:\n if current_distance == d:\n airports_d_hop.update(queue)\n\n current_distance += 1\n\n current_path = set()\n for poped_node in queue:\n for child in graph_adj[poped_node]:\n if child not in visited:\n visited.add(child)\n current_path.add(child)\n\n queue = current_path\n \n for airport_d_hop in airports_d_hop:\n cities_d_hop.add(self.GetCityFromAirportId(airport_d_hop))\n\n return cities_d_hop", "def visit_city(city):\n visited_cities.append(city)\n unvisited_cities.remove(city)", "def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary", "def brute_force(city_list):\n start = time.time()*1000\n shortest = exhaustive_search(city_list,6)\n stop = time.time()*1000\n print(\"Shortest tour for 6 first cities:\", tour_distance(shortest))\n print (\"Time spent on 6 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,7)\n stop = time.time()*1000\n print(\"Shortest tour for 7 first cities:\", tour_distance(shortest))\n print (\"Time spent on 7 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,8)\n stop = time.time()*1000\n print(\"Shortest tour for 8 first cities:\", tour_distance(shortest))\n print (\"Time spent on 8 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,9)\n stop = time.time()*1000\n print(\"Shortest tour for 9 first cities:\", tour_distance(shortest))\n print (\"Time spent on 9 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,10)\n stop = time.time()*1000\n print(\"Shortest tour for 10 first cities:\", tour_distance(shortest))\n print (\"Time spent on 10 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\" \")", "def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)", "def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)", "def hill_climb(data, selected_cities):\n #Picking a random solution as the current best.\n shortest_tour = random.sample(selected_cities, len(selected_cities))\n shortest_distance = get_distance(data, shortest_tour)\n\n if(len(shortest_tour) > 1):\n\n fair_evaluations = 1000 #Number of evaluations\n worse_neighbors = 0\n\n #Compare to neighbor solution(s) and repeat until no better solution is found.\n while worse_neighbors <= fair_evaluations:\n copy = shortest_tour.copy()\n new_tour = swap_random_cities(copy) #Choose neighbor\n new_distance = get_distance(data, new_tour)\n\n if new_distance < shortest_distance:\n shortest_tour = new_tour\n shortest_distance = new_distance\n worse_neighbors = 0\n\n else: worse_neighbors += 1\n\n return shortest_tour, shortest_distance", "def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)", "def alltours(cities):\n start = first(cities)\n return [[start] + Tour(rest)\n for rest in itertools.permutations(cities - {start})]", "def nearest_neighbor(FILE_NAME):\n time_0 = DEFAULT_TIMER()\n list_of_cities = read_in_file(FILE_NAME)\n best_opt_distance = sys.maxsize\n best_sol_of_cities = []\n\n for city in list_of_cities: #iterate through all cities in the list\n for resetCity in list_of_cities: #reset them all to not visited\n resetCity[3] = False\n sol_of_cities = [] #set our current solution to empty\n opt_distance = 0 #reset our current optimal solution\n nearest_dist = sys.maxsize #nearest distance is huge\n nearest_city = 0\n first_city = city #the first city visited is the next city in our iteration\n first_city[3] = True\n sol_of_cities.append(first_city[0])\n\n city_1 = first_city\n\n while len(sol_of_cities) < len(list_of_cities): #check the current city to\n #find the closest city in the\n #list of cities not visited\n nearest_dist = sys.maxsize\n for city_2 in list_of_cities:\n #if the two cities are not the same and have not visited the second city\n if city_1[0] != city_2[0] and city_2[3] != True:\n dx = city_1[1] - city_2[1]\n dy = city_1[2] - city_2[2]\n dist_to = int(round(math.sqrt((dx*dx) + (dy*dy))))\n #update nearest_city if the new distance is less than the current nearest distance\n if dist_to < nearest_dist:\n nearest_dist = dist_to\n nearest_city = city_2\n#add the city only if we haven't already visited it. Had a wierd bug need to investigate this further\n if nearest_city[3] != True: \n sol_of_cities.append(nearest_city[0])\n opt_distance += nearest_dist\n nearest_city[3] = True\n city_1 = nearest_city\n #quit if our distance is larger than our best solution found\n if(opt_distance > best_opt_distance):\n #print(\"quit this search\") #comment out, see skipped searches\n break\n#add the distance from the last city we visited back to the first city to make a tour\n dx = nearest_city[1] - first_city[1]\n dy = nearest_city[2] - first_city[2]\n opt_distance += int(round(math.sqrt((dx*dx) + (dy*dy))))\n#if our current solution is better than the previous solution replace it, then start over until we've checked all cities\n if best_opt_distance > opt_distance:\n best_opt_distance = opt_distance\n best_sol_of_cities = sol_of_cities\n #print(best_sol_of_cities) #can remove later for testing now\n #print(best_opt_distance) #same as above\n \n write_to_file(best_sol_of_cities, best_opt_distance, FILE_NAME)\n time_1 = DEFAULT_TIMER()\n print(\"Time taken: \", (time_1 - time_0))", "def exhaustive_search(city_list,until):\n all_tours = recursive_travel(city_list[0:until])\n shortest = shortest_tour(all_tours)\n return shortest", "def search(self, passions, number_of_results):\n\n # Sanitize spaces with -\n passions = [p.lower().replace(' ', '_') for p in passions]\n cities_with_scores = {}\n\n for d in self.city_passion_matrix:\n # Initialize score of cities for the passions\n cities_with_scores[d] = { 'score': 0.0, 'total': self.total_endorsement_per_city[d]};\n # Score of d for given passions is the multiplication of individual values\n for p in passions:\n # If passion does not exist then endorsement_count is 1 to avoid zeroth bias,\n # otherwise stored value\n if p in self.city_passion_matrix[d]:\n endorsement_count = self.city_passion_matrix[d][p]\n else:\n endorsement_count = 1\n cities_with_scores[d]['score'] += math.log((endorsement_count / (self.total_endorsement_per_city[d] + 1.0)) )\n\n # Sort the cities in descending order of relavency score; use total endorsement count to break tie\n sorted_cities_with_scores = sorted(cities_with_scores.items(), key= lambda x: (x[1]['score'], x[1]['total']), reverse=True)\n\n return sorted_cities_with_scores[:number_of_results]", "def get_host_info(search_keyword, starbucks_data, city_info):\n host_data = []\n\n payload = {\n \"query_type\": \"RQBXY\",\n \"pagesize\": \"20\",\n \"pagenum\": '',\n \"qii\": \"true\",\n \"cluster_state\": \"5\",\n \"need_utd\": \"true\",\n \"utd_sceneid\": \"1000\",\n \"div\": \"PC1000\",\n \"addr_poi_merge\": \"true\",\n \"is_classify\": \"true\",\n \"zoom\": \"14\",\n \"longitude\": starbucks_data['longitude'],\n \"latitude\": starbucks_data['latitude'],\n \"range\": \"1000\",\n \"city\": city_info[1][0],\n \"keywords\": search_keyword,\n }\n\n for page_num in range(1, 3):\n payload['pagenum'] = page_num\n poi_list = request_amap_poi_info(payload, 'https://www.amap.com/place/' + starbucks_data['amap_key'])\n\n if not poi_list:\n print('request host list fail with %s' % page_num)\n continue\n\n for poi in poi_list:\n if not (poi.get('longitude', '') or poi.get('latitude', '') or starbucks_data['longitude'] or starbucks_data['latitude']):\n distance = None\n else:\n distance = geo_distance(poi.get('longitude', ''), poi.get('latitude', ''),starbucks_data['longitude'], starbucks_data['latitude'])\n\n data = {\n 'starbucks_key': starbucks_data['amap_key'],\n 'keyword': search_keyword,\n 'city': poi.get('cityname'),\n 'name': poi.get('name'),\n 'longitude': poi.get('longitude'),\n 'latitude': poi.get('latitude'),\n 'address': poi.get('address'),\n 'tel': poi.get('tel'),\n 'mean_price': '',\n 'distance': distance\n }\n domain_list = poi.get('domain_list')\n for domain in domain_list:\n if domain.get('name', '') == 'price':\n price_raw = domain.get('value', '')\n # price_raw = \"<font color='#90969a'>人均:</font><font color='#f84b57'>¥</font><font color='#f84b57'>114</font>\"\n try:\n data['mean_price'] = re.findall('<.*>人均:<.*>¥<.*>([0-9]+)</font>', price_raw)[0]\n except:\n data['mean_price'] = None\n break\n host_data.append(data)\n\n print('【%s】的【%s】的周边的【%s】菜系,第【%d】页爬取完毕' % (city_info[1], starbucks_data['name'], search_keyword, page_num))\n return host_data", "def getCitySightings(catalog,city):\n cities=catalog['cities']\n keyset=om.keySet(cities)\n selected_city=om.get(cities,city)['value']\n match=lt.newList(datastructure='ARRAY_LIST')\n max_sightings=0\n max_city=\"\"\n #Para obtener la ciudad con mas cantidad de avistamientos toca recorrer todo el arbol.\n #De esto se encarga el siguiente for-\n for c in lt.iterator(keyset):\n city_sightings=lt.size(om.get(cities, c)['value'])\n if city_sightings>max_sightings:\n max_sightings=city_sightings\n max_city=c\n \n #Añade todo los avistamientos de la ciudad ingresada a una lista.\n for sight in lt.iterator(selected_city):\n lt.addLast(match,sight)\n total=lt.size(match)\n ms.sort(match,compareDateTime)\n \n #Hacer la lista en caso de que halla mas que 6\n if total>6:\n joined=lt.subList(match,1,3)\n last=lt.subList(match,total-3,3)\n for a in lt.iterator(last):\n lt.addLast(joined, a)\n else:\n joined=lt.subList(match,1,total)\n return total, joined, max_sightings, max_city", "def place_cities(self, n=20):\n self.city_score = self.flow ** 0.5\n self.city_score[self.elevation[:-1] <= 0] = -9999999\n self.cities = []\n while len(self.cities) < n:\n # location of potential new city is place with maximum score\n newcity = np.argmax(self.city_score)\n\n # Only place cities between 0.1 and 0.9 axes.\n city_max_ax = 0.85\n city_min_ax = 0.15\n # Chance that this location has no city, scales with number of cities placed so far\n if (\n np.random.random() < (len(self.cities) + 1) ** -0.2\n and city_min_ax < self.vxs[newcity, 0] < city_max_ax\n and city_min_ax < self.vxs[newcity, 1] < city_max_ax\n ):\n self.cities.append(newcity)\n\n # penalize city score for the newcity location.\n self.city_score -= 0.01 * 1 / (distance(self.vxs, self.vxs[newcity, :]) + 1e-9)", "def get_cities():\n _, cities = API.cities(limit=1000)\n result = []\n for city in cities['results']:\n result.append(city['city'])\n return result", "def get_top_station_csv(city):\n list_index = np.load(\"/home/ryj/renyajie/exp/NETL/data/spider_data/station_list/final_list_index.npy\",\n allow_pickle=True)\n list_remap = np.load(\"/home/ryj/renyajie/exp/NETL/data/exp_data/station_list/list_remap_{}.npy\".format(city),\n allow_pickle=True)\n list_index = dict(list_index.tolist())\n list_remap = dict(list_remap.tolist())\n\n # get longitude and latitude\n geo_map = {}\n with open(\"/home/ryj/renyajie/exp/NETL/data/spider_data/station_list/final_list_{}.csv\".format(city)) as f:\n reader = csv.reader(f)\n for line in reader:\n geo_map[int(line[0])] = (float(line[1]), float(line[2]))\n\n with open(\"/home/ryj/renyajie/exp/NETL/data/exp_data/station_list/list_{}.csv\".format(city), \"w\") as f:\n writer = csv.writer(f)\n for old_index, new_index in list_remap.items():\n if old_index in list_index[city]:\n writer.writerow([new_index, geo_map[old_index][0], geo_map[old_index][1], list_index[city][old_index]])\n pass", "def get_all_roads_starting_from(network, city):\n return network[1][city][0]", "def find(categories, keywords, cities):\n results = []\n for city in cities:\n for cat in categories:\n city_items = find_by_city(city, cat, keywords)\n results.append(city_items)\n return results", "def nn_tsp(cities, start=None):\n if start is None: start = first(cities)\n tour = [start]\n unvisited = set(cities - {start})\n while unvisited:\n C = nearest_neighbor(tour[-1], unvisited)\n tour.append(C)\n unvisited.remove(C)\n return tour", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "async def excursion_by_city_search(city_name: str, lang='en', limit=10) -> dict:\n city_params = {'lang': lang, 'name': city_name, 'limit': 100}\n cities = await excursion_instance.get_cities(city_params)\n for city in cities:\n if city.get('name') == city_name:\n data = {'city_id': city.get('region_id'),\n 'country_id': city.get('country_id'),\n 'limit': limit}\n return await excursion_instance.get_excursions(data)", "def compute_distance_pairs(cities):\n global distances_pair\n for city_from in cities:\n distances_pair.append([0 for r in range(city_from.index)]) # create\n for city_to in cities[:city_from.index]:\n distances_pair[city_from.index][city_to.index] = city_from.compute_distance_to_city_in_km(city_to)", "def dedup_nearby(self):\n nearby = sorted(self.nearby, key=lambda p: (p['pokemonId'], p['expiration_time']))\n\n pokemon = []\n timestamp = time.time()\n\n for key, group in itertools.groupby(nearby, key=lambda p: p['pokemonId']):\n data = next(group)\n\n monster = Pokemon.objects.get(pokedex_number=data['pokemonId'])\n latitude = data['latitude']\n longitude = data['longitude']\n\n pokemon.append(\n NearbyPokemon(\n monster.name,\n data['expiration_time'],\n latitude,\n longitude,\n )\n )\n\n monster.sightings.append([timestamp, latitude, longitude])\n monster.save()\n\n return pokemon", "def FindCityVisit(self, city_visit_parameters,\n city_visit_accumulator_generator):\n raise NotImplemented()", "def get_all_neighbors(locs):\n neighbors = (get_neighbors(loc) for loc in locs)\n return set.union(*neighbors)", "def collect_city_items(self, samples: int, city: str, country:str) -> None:\n time_start = time.time()\n url = self.get_city_url(city,country)\n samples_taken = 0\n while url != None:\n\n page_source = self.get_page_source(url, \"_1g5ss3l\")\n soup = BeautifulSoup(page_source, \"html.parser\") \n for item in soup.find_all(\"div\", class_=\"_fhph4u\"): \n if samples_taken == samples:\n self.__driver.quit()\n print(\n f\"{city} scraping is done!{samples_taken} samples was taken.Time elapsed: {time.time()-time_start} seconds.\"\n )\n return\n else:\n self.__collected_dic[\"city\"].append(city)\n\n item_url = self.get_item_url(item)\n self.get_item_title(item)\n self.get_item_property_type(item)\n self.get_item_location(item)\n self.get_item_rating(item)\n self.get_item_reviews(item)\n self.get_item_price(item)\n self.get_item_guests(item)\n self.get_item_bedrooms(item)\n self.get_item_beds(item)\n self.get_item_baths(item)\n\n self.collect_amenities(item_url)\n\n samples_taken = samples_taken + 1\n url = self.find_next_page(soup)\n print(\n f\"{city} scraping is done!{samples_taken} samples was taken.Time elapsed: {time.time()-time_start} seconds.\"\n )", "def annealing(cities, temperature_begin=1.0e+300, temperature_end=.1, cooling_factor=.99, nb_iterations=1):\n\n cities_best = cities[:]\n distance_best = total_distance_in_km(cities_best)\n\n distances_current = []\n distances_best = []\n ids_iteration = []\n\n try:\n for iteration in range(nb_iterations):\n # the search is restarted at every iteration from\n # the best know solution\n temperature = temperature_begin\n cities_current = cities_best[:]\n distance_current = distance_best\n distance_new = distance_best\n cities_new = cities_best[:]\n\n step = 0\n while temperature > temperature_end:\n # compute the indices of the two cities to swap by random,\n # but never touch the first city (it does not need to change)\n index = random.sample(range(len(cities_new) - 1), 2)\n index[0] += 1\n index[1] += 1\n\n # optimize by recomputing only the changed distances\n swap_before = distance_swap(cities_new, index[0], index[1])\n cities_new[index[0]], cities_new[index[1]] = cities_new[index[1]], cities_new[index[0]]\n swap_after = distance_swap(cities_new, index[0], index[1])\n\n # compute the new distance\n # recomputing all is bad: distance_new = total_distance_in_km(cities_new)\n distance_new = distance_new - swap_before + swap_after\n\n # acceptance probability by Kirkpatrick et al.\n diff = distance_new - distance_current\n if diff < 0 or math.exp( -diff / temperature ) > random.random():\n cities_current = cities_new[:]\n distance_current = distance_new\n else:\n # reset cities and distance\n distance_new = distance_current\n cities_new = cities_current[:]\n\n # update the best if current solution is better\n # not part of the annealing itself, just used for the restart\n if distance_current < distance_best:\n cities_best = cities_current[:]\n distance_best = distance_current\n\n if True:\n # if step % 100 == 0:\n # uncomment to enable systematic sampling: 1 point every 100th\n distances_current.append(distance_current)\n distances_best.append(distance_best)\n temperature = temperature * cooling_factor\n step = step + 1\n\n ids_iteration.append(len(distances_current))\n\n except KeyboardInterrupt:\n print \"Interrupted on user demand.\"\n print 'performed iterations: %d' % iteration \n\n return cities_best, distances_current, distances_best, ids_iteration", "def searchNearbyHospitals(lat, lng, radius = 5000, limit_search_count = 10):\n # Initialising the GooglePlaces constructor \n google_places = GooglePlaces(GOOGLE_MAPS_API_KEY) \n \n query_result = google_places.nearby_search( \n lat_lng ={'lat': lat, 'lng': lng}, \n radius = radius, \n types =[types.TYPE_HOSPITAL]) \n \n nearby_hospitals = []\n\n for place in query_result.places[:limit_search_count]: \n hospital = dict()\n place.get_details()\n hospital['hospitalName'] = place.name\n hospital['address'] = place.formatted_address\n hospital['phoneNumber'] = place.local_phone_number \n nearby_hospitals.append(hospital)\n\n if(len(nearby_hospitals) == 0):\n nearby_hospitals = searchNearbyHospitals(lat = lat, lng = lng, radius = radius+5000)\n\n return nearby_hospitals", "def initCitys(self):\n self.cities = []\n for vertex in self.metaGraph:\n self.cities.append(vertex)" ]
[ "0.6298028", "0.60487", "0.58636826", "0.58009905", "0.558688", "0.5529587", "0.5491075", "0.54383576", "0.54128426", "0.5362866", "0.5285198", "0.5187931", "0.5150512", "0.5140834", "0.5108585", "0.5091598", "0.506311", "0.5040769", "0.503374", "0.4991658", "0.49839562", "0.49689263", "0.49476865", "0.49468565", "0.49235433", "0.49093017", "0.48995063", "0.48981956", "0.48862684", "0.48792183" ]
0.73122805
0
This function determines if two cities can be reached in the graph. This algorithm uses a breadthfirst search approach
def does_route_exist(graph: TeleportGraph, start_city: str, end_city: str) -> bool: queue = Queue() start_city_node = graph.find_city_node(start_city) queue.put(start_city_node) # keep track of the nodes we've visited - if we do not do this, we'll wind up in an infinite loop because since # we're dealing with an undirected graph visited_nodes = {start_city_node} while not queue.empty(): city = queue.get() # if we've found the matching city - a route exists. no need to continue processing if city.name == end_city: return True # if not, let's check the related cities, only checking the cities we haven't visited already for related_city in city.related_cities: if related_city not in visited_nodes: visited_nodes.add(related_city) queue.put(related_city) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def are_connected(self, person1, person2):\n\n possible_nodes = Queue()\n seen = set()\n possible_nodes.enqueue(person1)\n seen.add(person1)\n\n while not possible_nodes.is_empty():\n person = possible_nodes.dequeue()\n print(\"checking\", person)\n if person is person2:\n return True\n else:\n for cohabitant in person.adjacent - seen:\n possible_nodes.enqueue(cohabitant)\n seen.add(cohabitant)\n print(\"added to queue:\", cohabitant)\n return False", "def bidirectional_search(self):\n begin = time.time()\n\n initial_node = self.get_node(self.initial_state)\n\n final_node = self.get_node(self.final_state)\n\n queue = [initial_node, final_node]\n\n initial_node.visited_right = True\n \n final_node.visited_left = True\n\n visited_nodes = []\n \n while queue:\n node = queue.pop(0)\n\n if self.is_intersecting(node):\n end = time.time()\n\n method_time = end - begin\n\n copy_node = node\n\n path = []\n\n while node:\n path.append(node)\n\n node = node.parent_right\n\n path.reverse()\n\n del path[-1]\n\n while copy_node:\n path.append(copy_node)\n\n copy_node = copy_node.parent_left\n\n self.add_result('Busca bidirecional', method_time, path, visited_nodes)\n \n return True\n else:\n states = node.neighboring_states()\n\n neighbors = [self.add_node(state) for state in states]\n\n for neighbor in neighbors:\n if node.visited_left and not neighbor.visited_left:\n neighbor.parent_left = node\n\n neighbor.visited_left = True\n\n queue.append(neighbor)\n \n visited_nodes.append(neighbor)\n\n if node.visited_right and not neighbor.visited_right:\n neighbor.parent_right = node\n\n neighbor.visited_right = True\n\n queue.append(neighbor)\n\n visited_nodes.append(neighbor)\n \n end = time.time()\n\n method_time = end - begin\n\n self.add_result('Busca bidirecional', method_time, [], visited_nodes)\n\n return False", "def connected_pair(self, first, second):\n cover = set()\n queue = {first}\n while queue:\n new = queue.pop()\n cover.add(new)\n for adjacent in new.parents() | new.children():\n if adjacent == second:\n return True\n elif not adjacent in cover:\n queue.add(adjacent)\n return False", "def FindDHopCities(self, X, d):\n # G = nx.Graph()\n # G.add_nodes_from(self.nodes)\n # G.add_edges_from(self.edges)\n\n # airports_id_in_city = self.airports.loc[self.airports['city'] == X, 'airport_id'].to_list()\n\n # cities_h_hop = set()\n # for airport in airports_id_in_city:\n # airports_h_hop = nx.descendants_at_distance(G, airport, h)\n # for airport_h_hop in airports_h_hop:\n # cities_h_hop.add(self.GetCityFromAirportId(airport_h_hop))\n\n # return cities_h_hop\n\n graph_adj = self.graph\n\n airports_id_in_city = self.airports.loc[self.airports['city'] == X, 'airport_id'].to_list()\n cities_d_hop = set()\n for airport in airports_id_in_city:\n airports_d_hop = set()\n current_distance = 0\n queue = {airport}\n visited = {airport}\n \n # BFS\n while queue:\n if current_distance == d:\n airports_d_hop.update(queue)\n\n current_distance += 1\n\n current_path = set()\n for poped_node in queue:\n for child in graph_adj[poped_node]:\n if child not in visited:\n visited.add(child)\n current_path.add(child)\n\n queue = current_path\n \n for airport_d_hop in airports_d_hop:\n cities_d_hop.add(self.GetCityFromAirportId(airport_d_hop))\n\n return cities_d_hop", "def does_loop_exist(graph: TeleportGraph, city: str) -> bool:\n start_node = graph.find_city_node(city)\n visited_inner_nodes = set()\n\n def is_loop(path: Path, node: CityNode) -> bool:\n # check the current path + node combination to see if they form a loop\n new_path = path + node\n\n if node != start_node and node not in visited_inner_nodes:\n visited_inner_nodes.add(node)\n\n if new_path.is_loop():\n return True\n\n # If we've not found a loop yet, let's check the related nodes of this node and call ourselves recursively\n # get the set of nodes for the candidates in the path. This list comprehension filters out related nodes that\n # point back to the last entry on the path to avoid backtracking along the same nodes back to the origin\n # path_candidates = path.get_directed_path_nodes(node.related_cities)\n path_candidates = [rc for rc in node.related_cities\n if rc not in visited_inner_nodes and rc not in path.data[-1:]]\n for pc in path_candidates:\n # check to see if the path candidate, in combination with the current path object form a loop\n if is_loop(new_path, pc):\n return True\n\n # the current path and all its descendants do not form a loop\n return False\n\n return is_loop(Path(), start_node)", "def main():\n # create graph\n city = create_city()\n\n # create costs cities\n costs = create_costs()\n\n # create min path to city\n path = create_min_path()\n\n # create list not check city\n list_cities = ['biysk', 'barnaul', 'novosibirsk', 'belokurikha',\n 'tomsk', 'krasnoyarsk', 'omsk']\n\n used_city = list_cities.pop(0) # Сity that we processing\n while list_cities:\n used_costs = costs[used_city] # Cost of the current city\n used_path = path[used_city]\n for neighbor in city[used_city]:\n costs_neighbor = costs[neighbor]\n path_to_neighbor = city[used_city][neighbor]\n\n # If path on current node less then rewrite the neighbor node\n if used_costs + path_to_neighbor < costs_neighbor:\n costs[neighbor] = used_costs + path_to_neighbor\n path[neighbor] = used_path + [neighbor]\n\n # Finding the min path to the neighbor\n min_path = min(city[used_city].values())\n # Entry in used_city the city with min path to it\n used_city = find_city(city, used_city, min_path)\n # Deleting city from the uninitiated cities\n list_cities.remove(used_city)\n\n for city, value in costs.items():\n print(f'{city:13} {value:2}', end=\" \")\n print(*path[city], sep=\", \")", "def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False", "def flight_paths(city1, city2, data=data):\n cities_to_travel = Graph() # instantiate a new graph\n location_dict = {} # empty dictionary to hold city, location, and distances\n for city in data: # creates dictionary of key cities, values: lat and long\n try:\n location_dict[city['city']] # check if city is already in dictionary\n except KeyError:\n location_dict[city['city']] = city['lat_lon'] # add's city as key and it's lat/long as value\n for city in data: # adds distances between each connected city\n for destination in city['destination_cities']:\n try: # adding edge and weights (distances) between cities\n cities_to_travel.add_edge(city['city'], destination, calculate_distance(city['lat_lon'], location_dict[destination]))\n except KeyError: # edge case; if connection already exists or points to city that doesn't have a lat/long\n pass\n try:\n to_return = cities_to_travel.bellman_ford(city1, city2) # Bellman Ford shortest path through city\n if to_return[0] == float(\"inf\"):\n raise KeyError(\"City does not exist\")\n else:\n return to_return\n except KeyError:\n raise KeyError('City has no Lat or Long given, or does not exist')", "def verbose_are_connected_recursive(self, person1, person2, seen=None):\n\n if not seen:\n seen = set()\n\n if person1 is person2:\n print(\"\\nreturning True - {} is {}\".format(person1.name, person2.name))\n return True\n\n seen.add(person1) # Keep track that we've visited here\n print(\"adding\", person1)\n\n for person in person1.adjacent:\n\n if person not in seen:\n\n print(\"calling method on {}'s cohabitant {} with {}\".format(person1.name, person.name, person2.name))\n if self.verbose_are_connected_recursive(person, person2, seen):\n print(\"\\nreturning True from checking {}\".format(person.name))\n return True\n\n print(\"returning False from checking {}\".format(person1.name))\n return False", "def test_same_node_is_reachable(self):\n # G is an arbitrary tournament on ten nodes.\n G = DiGraph(sorted(p) for p in combinations(range(10), 2))\n assert_true(all(is_reachable(G, v, v) for v in G))", "def path_exists(G, node1, node2):\n visited_nodes = set()\n\n # Initialize the queue of cells to visit with the first node: queue\n queue = [node1]\n\n # Iterate over the nodes in the queue\n for node in queue:\n\n # Get neighbors of the node\n neighbors = G.neighbors(node)\n\n # Check to see if the destination node is in the set of neighbors\n if node2 in neighbors:\n print('Path exists between nodes {0} and {1}'.format(node1, node2))\n return True\n break\n\n else:\n visited_nodes.add(node)\n queue.extend([n for n in neighbors if n not in visited_nodes])\n\n # Check to see if the final element of the queue has been reached\n if node == queue[-1]:\n print('Path does not exist between nodes {0} and {1}'.format(\n node1, node2))\n\n # Place the appropriate return statement\n return False", "def cities_occur_in_route(network, cities,route):\n list_cities = get_cities_of_route(network, route)\n for i in cities:\n if i not in list_cities:\n return False\n return True", "def obstacle_between(self, node1, node2, agent):\n if self.obstacles[agent] is None:\n return False\n if self.is_inside(node1, self.obstacles, agent)[0] or self.is_inside(node2, self.obstacles, agent)[0]:\n return True\n\n for cords in self.xy_cords:\n x1 = node1.state[cords[0]]\n y1 = node1.state[cords[1]]\n x2 = node2.state[cords[0]]\n y2 = node2.state[cords[1]]\n p1 = Point(x1, y1)\n q1 = Point(x2, y2)\n for obstacle in self.obstacles[agent]:\n x_min = obstacle[0][0]\n x_max = obstacle[0][1]\n y_min = obstacle[1][0]\n y_max = obstacle[1][1]\n p2 = Point(x_min, y_min)\n q2 = Point(x_min, y_max)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_min, y_max)\n q2 = Point(x_max, y_max)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_max, y_max)\n q2 = Point(x_max, y_min)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_max, y_min)\n q2 = Point(x_min, y_min)\n if doIntersect(p1, q1, p2, q2):\n return True\n return False", "def __canReachDFS(current, visited):\n for neighbor in current.getConnections():\n # this check prevents cycles from infinitely looping\n if neighbor not in visited:\n visited.add(neighbor)\n __canReachDFS(neighbor, visited)", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]", "def search_loop(graph):\n visited = set()\n for vertex in graph:\n if vertex not in visited:\n if dfs(vertex, graph, visited, -1):\n return True\n return False", "def are_connected_recursive(self, person1, person2, seen=None):\n\n if not seen:\n seen = set()\n\n if person1 is person2:\n return True\n\n seen.add(person1) # Keep track that we've visited here\n print(\"adding\", person1)\n\n for person in person1.adjacent:\n\n if person not in seen:\n\n if self.are_connected_recursive(person, person2, seen):\n return True\n\n return False", "def dfs(self):\n\n stack = [self.root]\n\n while stack:\n node = stack[-1]\n\n if node.goal:\n return True\n\n if not node.visited:\n node.visited = True\n\n for adj_node in self.return_adj_nodes(node):\n if adj_node and not adj_node.visited and not adj_node.wall:\n stack.append(adj_node)\n break\n else:\n stack.pop()\n\n return False", "def is_dominated(g,n1,n2): # g: graph; n1,n2: node addresses\n if 'head' in g.nodes[n1]:\n head = g.nodes[n1]['head']\n if head==n2:\n return True\n if is_dominated(g,head,n2):\n return True\n return False", "def connected(self, x, y):\n\n return self.__find_root(x) == self.__find_root(y)", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def dfs(self, node: int, graph: Dict[int, List[int]], visited: Set[int]) -> bool:\n if visited[node] == 1:\n return False\n if visited[node] == -1:\n return True\n visited[node] = -1\n has_cycle = any(self.dfs(node, graph, visited) for node in graph[node])\n visited[node] = 1\n return has_cycle", "def is_complete(self, A, B):\n return all(self.is_edge(v, w) for v in A for w in B)", "def optimizedRoutePossibilities2(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tpath = find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tif couple[0] in graph[path[-1]]:\n\t\t\t\tyield path", "def stateAlreadyVisited(kState):\n\n visited = False\n \n for st in visitedStates:\n #check if both states are same\n visited = areStatesSame(st,kState)\n if visited == True:\n return visited\n\n return visited", "def find_path(G, path):\n # create index for background and foreground\n back = len(G)-2\n fore = len(G)-1\n # create dictionary of visited or unvisited based on city name\n visited = {}\n # make all pixels as unvisited\n for i in range(len(G)-2):\n visited[i] = False\n visited[fore] = False\n \n # Create a queue for BFS\n queue=[]\n\n # Mark the source node as visited and enqueue it\n queue.append(back)\n # background is starting node\n visited[back] = True\n\n while queue:\n\n # dequeue a node from queue\n u = queue.pop(0)\n\n # iterate through the pixels inc. fore and back\n for ind in range(len(G)):\n # check for weights\n if visited[ind] == False and G[u,ind] > 0:\n queue.append(ind)\n visited[ind] = True\n path[ind] = u\n\n # If we reached sink in BFS starting from source return True\n return True if visited[fore] else False", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self._graph and node2 in self._graph[node1]", "def find_nearby_cities(graph: TeleportGraph, city: str, num_hops: int = 1) -> set:\n\n if num_hops == 0:\n return set()\n\n start_city_node = graph.find_city_node(city)\n\n city_nodes = {start_city_node}\n\n for i in range(num_hops):\n related_cities = set()\n\n # for every city in the current set, find all its related cities and add them to the global list of cities\n for city in city_nodes:\n related_cities |= city.related_cities\n\n city_nodes |= related_cities\n\n # The starting city cannot be near itself. It will always be added to the set because we have\n # bi-directional (undirected) edges between cities.\n city_nodes.remove(start_city_node)\n return {city.name for city in city_nodes}", "def dfs(n):\n if seen[n]: return seen[n] == 1 \n seen[n] = 1\n if any(dfs(nn) for nn in digraph.get(n, set())): return True \n seen[n] = 2\n return False", "def any_neighbors(nodelist, G):\n outcome = False\n #neighbors = P.neighbors(n)\n for i in range(len(nodelist)):\n for j in range(i+1, len(nodelist)):\n if G.has_edge(nodelist[i], nodelist[j]) or G.has_edge(nodelist[j], nodelist[i]):\n ##if nodelist[j] in G.neighbors(nodelist[i]):\n outcome = True\n return outcome\n return outcome" ]
[ "0.67384744", "0.6349388", "0.63051564", "0.6257954", "0.6152257", "0.6132436", "0.6108083", "0.60494477", "0.60445905", "0.59779936", "0.59365493", "0.5929931", "0.59287775", "0.5921574", "0.58920145", "0.58911324", "0.58680403", "0.5858796", "0.58010995", "0.57815397", "0.57688653", "0.5762758", "0.57621163", "0.57598424", "0.5753844", "0.5748808", "0.57353455", "0.5733997", "0.57097816", "0.5700974" ]
0.67146933
1
Takes a list of words as input and returns a list of the n most frequently occurring words ordered from most to least frequently occurring.
def get_top_n_words(word_list, n): #Uses Counter function to create tuples of words and number of instances of word wordCount = Counter(word_list) topWords = [] orderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True) #create list of inputted 'n' top words for i in range (0 , n): topWords.append(orderedByFrequency[i]) return topWords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def get_top_n_words(word_list, n):\n\tword_counts = dict()\n\tfor word in word_list:\n\t\tword_counts[word] = 1 + word_counts.get(word,0)\n\n\twords_list = word_counts\n\tsorted_list = sorted(words_list.items(), key = lambda x: x[1])\n\tfinal_list = []\n\n\ti = -1\n\twhile i > ((-1 * n) - 1):\n\t\tfinal_list.append(sorted_list[i])\n\t\ti -= 1\n\n\tlist_without_numbers = [x[0] for x in final_list]\n\n\treturn list_without_numbers", "def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def calculate_most_frequent_n_words(self, input_string: str, n: int) \\\n -> List[WordFrequencyStructure]:\n results = \\\n self._typed_sorted_result(input_string=input_string)\n\n return results[:n]", "def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)", "def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist", "def count_words(s, n):\n\n # TODO: Count the number of occurences of each word in s\n words = s.lower().split()\n dict = {}\n\n for item in words:\n dict[item] = words.count(item)\n\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n items = dict.items()\n\n items.sort(key=lambda tup: tup[0])\n items.sort(key=lambda tup: tup[1], reverse=True)\n\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n return items[:n]", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def filter_top_n_words(topic_words_dict, n, word_list):\n # First remove any redundant words in word_list\n words = set(word_list)\n # Now get the intersection with words, that appear as keys in the dict\n topic_words_intersect = set(topic_words_dict.keys()).intersection(words)\n # Now get the words with their scores, sort descending for the scores\n # and return the first n words:\n score_wordlist = [(x, topic_words_dict[x]) for x in topic_words_intersect]\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def find_frequent_words(word_frequencies, amount=50):\n alphabetically_sorted = sorted(word_frequencies.most_common(amount), key=lambda tup: tup[0])\n final_sorted = sorted(alphabetically_sorted, key=lambda tup: tup[1], reverse=True)\n list1 = [i[0] for i in final_sorted]\n\n list2 = [i[1] for i in final_sorted]\n return list1, list2", "def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def find_the_frequency(the_list):\n\n freq = {}\n\n for word in the_list:\n if word not in freq:\n freq[word] = 1\n else:\n freq[word] += 1\n\n freq_list = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n return freq_list", "def get_word_list_with_freq_at_least_n(text, n = 2):\n word_freq_dists = get_freq_dist_from_corpus(text)\n selected_word_list = [word for word in word_freq_dists.keys() if word_freq_dists.get(word) >= n]\n return selected_word_list", "def count_words(s, n):\n \n strList = s.split(' ');#to split the string into a list of words\n rList = [];#to store the each word only once\n nList = [];#to store how many times each word has occured\n for i in range(len(strList)):\n if ((strList[i] in rList)==False):\n rList.append(strList[i]);\n nList.append(int(1));\n else:\n for j in range(len(rList)):\n if (strList[i]==rList[j]):\n nList[j]=nList[j]+1;\n \n tList = list();#a new empty tuple list\n for i in range(len(rList)):\n tList.append((rList[i],nList[i]));#construct the tuple list from rList and nList\n \n tList.sort(key=lambda tList: (-tList[1], tList[0]));#sort the tuple list: first by its 2nd element in reverse order \"-\", then sort by its 1st element in non-reverse order, no \"-\"\n \n # for testing\n #for i in tList:\n # print i;\n \n \n # TODO: Count the number of occurences of each word in s\n \n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n \n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n \n return tList[:n] #return the first n pairs of elements as required", "def freq_el(word_list, i):\n\n #get word counts\n word_counts = Counter(word_list)\n # word_counts = defaultdict(int)\n # for word in word_list:\n # word_counts[word] += 1\n\n #Make dictionary with count as key, words with same count as a value in a list\n byfreq = defaultdict(list)\n for k, v in word_counts.items():\n byfreq[v].append(k)\n\n #sort dictionary by key in decending order, return k items using slicing\n ret = []\n keys = sorted(byfreq, reverse = True)\n for v in keys:\n ret.extend(sorted(byfreq[v]))\n return ret[:i]", "def get_frequency_based_priors(n_common=3000, width_under_sigmoid=10):\n freq_map = get_word_frequencies()\n words = np.array(list(freq_map.keys()))\n freqs = np.array([freq_map[w] for w in words])\n arg_sort = freqs.argsort()\n sorted_words = words[arg_sort]\n\n # We want to imagine taking this sorted list, and putting it on a number\n # line so that it's length is 10, situating it so that the n_common most common\n # words are positive, then applying a sigmoid\n x_width = width_under_sigmoid\n c = x_width * (-0.5 + n_common / len(words))\n xs = np.linspace(c - x_width / 2, c + x_width / 2, len(words))\n priors = dict()\n for word, x in zip(sorted_words, xs):\n priors[word] = sigmoid(x)\n return priors", "def get_vocabulary_words_with_counts(txt, min_word_freq):\n\n data = txt.split()\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # keep words that occur more than min_word_freq\n top_count_pairs = [pair for pair in count_pairs if pair[1] > min_word_freq]\n return top_count_pairs" ]
[ "0.83019364", "0.8259304", "0.82464784", "0.8063228", "0.8041614", "0.7969055", "0.79085237", "0.78822184", "0.77217156", "0.7661636", "0.7626515", "0.75896144", "0.755189", "0.7525962", "0.748399", "0.7470999", "0.7364696", "0.71175003", "0.70950264", "0.70950264", "0.7066404", "0.7045714", "0.7043797", "0.7035046", "0.70343494", "0.7009562", "0.6967558", "0.69575953", "0.69496787", "0.69012165" ]
0.8294504
1
Convert hex colorrange to RGBA.
def hex2rgba(colors): if 'str' in str(type(colors)): colors = np.array([colors]) rgbcolors = list(map(lambda x: matplotlib.colors.to_rgba(x), colors)) return np.array(rgbcolors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex_to_rgba(h, alpha):\n return tuple([int(h.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4)] + [alpha])", "def normalize_rgb_colors_to_hex(css):\n log.debug(\"Converting all rgba to hexadecimal color values.\")\n regex = re.compile(r\"rgb\\s*\\(\\s*([0-9,\\s]+)\\s*\\)\")\n match = regex.search(css)\n while match:\n colors = map(lambda s: s.strip(), match.group(1).split(\",\"))\n hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))\n css = css.replace(match.group(), hexcolor)\n match = regex.search(css)\n return css", "def hex2rgb(hexcode):\n\treturn tuple(map(ord, hexcode[1:].decode('hex')))", "def hex2rgb(cls, hex):\r\n valid_char = '#1234567890abcdef'\r\n conditions = (hex[0] == '#',\r\n len(hex) == 7,\r\n all(c in valid_char for c in hex))\r\n if not all(conditions):\r\n raise ValueError\r\n return int(hex[1:3], 16), int(hex[3:5], 16), int(hex[5:], 16)", "def hex2rgb( hex ):\n\n hex = hex.lstrip( '#' )\n hlen = len( hex )\n hlen3 = int( hlen / 3 )\n\n return np.asarray( tuple(\n int( hex[ i : i + hlen3 ], 16 ) / 255. for i in range( 0, hlen, hlen3 ) ) )", "def hex_to_rgb(self,value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def HexToRGB(hex_str):\r\n hexval = hex_str\r\n if hexval[0] == u\"#\":\r\n hexval = hexval[1:]\r\n ldiff = 6 - len(hexval)\r\n hexval += ldiff * u\"0\"\r\n # Convert hex values to integer\r\n red = int(hexval[0:2], 16)\r\n green = int(hexval[2:4], 16)\r\n blue = int(hexval[4:], 16)\r\n return [red, green, blue]", "def hex_to_rgb(hex_val):\n h_len = len(hex_val)\n tupl = tuple(int(hex_val[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))\n final = ','.join(map(str, tupl))\n return final", "def hex_to_rgb(value):\r\n lv = len(value)\r\n out = tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\r\n out = tuple([x/256.0 for x in out])\r\n return out", "def to_rgba_source(rgba):\n color_string = rgba.lstrip('#')\n color = tuple(\n float(int(color_string[i:i+2], 16)) / 255.0\n for i in (0, 2, 4, 6)\n )\n return color", "def RGB_hex_to_color(text: str, a: int=255):\n num = int(text, 16)\n r = num // 65536\n g = (num - r * 65536) // 256\n b = num - r * 65536 - g * 256\n return np.clip(np.array([r, g, b, a], dtype='u1'), 0, 255)", "def hex2rgb(hx):\n if hx[0] == '#':\n return int(hx[1:3], 16), int(hx[3:5], 16), int(hx[5:7], 16)\n else:\n return int(hx[0:2], 16), int(hx[2:4], 16), int(hx[4:6], 16)", "def _hex2rgb(c_hex):\n # Pass 16 to the integer function for change of base\n return [int(c_hex[i:i + 2], 16) for i in range(1, 6, 2)]", "def hexToRed(self, hex):\n return int(hex[0:2], 16)", "def hex_to_RGB(hex_code: str) -> list:\n\n hex_code = hex_code.lstrip('#')\n return [int(hex_code[i:i + 2], 16) for i in (0, 2, 4)]", "def hex_to_rgb(value):\n value = value.lstrip('#')\n hex_total_length = len(value)\n rgb_section_length = hex_total_length // 3\n return tuple(int(value[i:i + rgb_section_length], 16)\n for i in range(0, hex_total_length, rgb_section_length))", "def hex_to_rgb(cls, hex_value):\n hex_value = hex_value.lstrip(\"#\")\n r,g,b = tuple(int(hex_value[i:i+2], 16) for i in (0, 2 ,4))\n return (r,g,b)", "def hex2rgb(hexcolors):\n hexcolors = toiter(hexcolors)\n rgb = []\n for s in hexcolors:\n s = s[len(s)-6:len(s)] # get last 6 characters\n r, g, b = s[0:2], s[2:4], s[4:6]\n r, g, b = int(r, base=16), int(g, base=16), int(b, base=16)\n rgb.append((r, g, b))\n return np.uint8(rgb)", "def hex2rgb(colors):\n if 'str' in str(type(colors)):\n colors = np.array([colors])\n\n rgbcolors = list(map(lambda x: matplotlib.colors.to_rgb(x), colors))\n return np.array(rgbcolors)", "def hex2color(s):\n hexColorPattern = re.compile(\"\\A#[a-fA-F0-9]{6}\\Z\")\n if not isinstance(s, basestring):\n raise TypeError('hex2color requires a string argument')\n if hexColorPattern.match(s) is None:\n raise ValueError('invalid hex color string \"%s\"' % s)\n return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(self, hex_r, hex_g, hex_b):\n r = hex_r / 255.0\n g = hex_g / 255.0\n b = hex_b / 255.0\n return (r, g, b)", "def hex_to_rgb(hexa):\n return tuple(int(hexa[i:i+2], 16) for i in (0, 2, 4))", "def hex_to_rgb(hex):\n hex = hex.lstrip('#')\n hlen = len(hex)\n return tuple(int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3))", "def _rgb_to_rgba(self, r, g, b):\n # r, g, b must be out of 255\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n r, g, b = colorsys.hsv_to_rgb(h, s, max(v, 240))\n\n return [r / 255, g / 255, b / 255, min(v + 20, 255) / 255]", "def convert_hex_to_rgb(hex_string):\n hex_string = hex_string.lstrip('#')\n return [int(hex_string[i:i + 2], 16) / 255.0 for i in (0, 2, 4)]", "def rgba_hex_str(self, x):\n return \"#%02x%02x%02x%02x\" % self.rgba_bytes_tuple(x)" ]
[ "0.74167764", "0.70776886", "0.6812988", "0.6806883", "0.66850585", "0.66562873", "0.66246355", "0.66192836", "0.6602443", "0.6597313", "0.6596636", "0.65546227", "0.65298957", "0.6514845", "0.6510283", "0.65088314", "0.64748806", "0.6454721", "0.6445255", "0.64353675", "0.6403181", "0.6403181", "0.6403181", "0.6403181", "0.6389123", "0.6374385", "0.6350232", "0.6342968", "0.6342923", "0.6313483" ]
0.71970624
1
Generate colors from input list. This function creates unique colors based on the input list y and the cmap. When the gradient hex color is defined, such as '000000', a gradient coloring space is created between two colors. The start color of the particular y, using the cmap and The end color is the defined gradient, such as '000000'.
def fromlist(y, X=None, cmap='Set1', gradient=None, method='matplotlib', scheme='rgb', opaque_type='per_class', verbose='info'): # Set the logger set_logger(verbose=verbose) # make unique y = np.array(y) uiy = np.unique(y) # Get colors colors_unique = generate(len(uiy), cmap=cmap, method=method, scheme=scheme, verbose=verbose) # Make dict for each search colordict = dict(zip(uiy, colors_unique)) # Create opaque levels opaque = np.array([1.0] * len(y)) # Color using density and the gradient. if gradient is not None: # Set the scheme if scheme=='rgb': colors = np.array([[0.0, 0.0, 0.0]] * len(y)) else: colors = np.array(['#000000'] * len(y)) # Make the colors based on the density for i, _ in enumerate(uiy): Iloc = uiy[i]==y if scheme=='rgb': # Set the rgb colors c_gradient = linear_gradient(_rgb2hex(colordict.get(uiy[i]) * 255), finish_hex=gradient, n=sum(Iloc)) colors[Iloc] = c_gradient['rgb'] / 255 opaque[Iloc] = c_gradient['opaque'] else: # Set the hex colors c_gradient = linear_gradient(colordict.get(uiy[i]), finish_hex=gradient, n=sum(Iloc)) colors[Iloc] = np.array(c_gradient['hex']) opaque[Iloc] = c_gradient['opaque'] else: # Get colors for y colors = list(map(colordict.get, y)) # Stack list of arrays into single array if scheme=='rgb': colors = np.vstack(colors) else: colors = np.array(colors) # Add a 4th column with the transparency level. if scheme=='rgb': logger.info('Add transparency to RGB colors (last column)') colors = np.c_[colors, opaque] # Add gradient for each class if (X is not None) and X.shape[0]==len(y): colors = gradient_on_density_color(X, colors, y, opaque_type=opaque_type, verbose=verbose) # Return return colors, colordict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_color_gradient():\n colors = []\n step = 10\n for red, green in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': green, 'blue': 0})\n for green, blue in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': 0, 'green': green, 'blue': blue})\n for blue, red in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': 0, 'blue': blue})\n return colors", "def get_colors(n, cmap=\"viridis\", start=0.0, stop=1.0, alpha=1.0, return_hex=False):\n colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)]\n colors = [(r, g, b, alpha) for r, g, b, _ in colors]\n if return_hex:\n colors = _rgb_color_list_to_hex(colors)\n return colors", "def _generate_colors(\n self, x: NDArrayFloat\n ) -> Sequence[RGBHexColor | None]:\n x = np.asarray(x)\n idx = np.round((x * 255) + ROUNDING_JITTER).astype(int)\n arr = np.column_stack(\n [self._r_lookup[idx], self._g_lookup[idx], self._b_lookup[idx]]\n )\n return [rgb_to_hex(c) for c in arr]", "def _create_colormap(self):\n\n max_count = max(self.data['rects'], key=lambda r: r['count'])['count']\n ranges_list = [\n [\n int(max_count/x[0])+1,\n int(max_count/x[1])+1\n ] for x in [[6, 3], [3, 2], [2, 1]]\n ]\n ranges_list = [[0, 1]] + [[1, ranges_list[0][0]]] + ranges_list\n\n self.data['colormap'] = [\n {\n 'fill': self._get_fill(\n i,\n range(\n ranges_list[i][0],\n ranges_list[i][1]\n )\n ),\n 'range': range(\n ranges_list[i][0],\n ranges_list[i][1]\n )\n } for i in range(0, 5)\n ]", "def create_colors_list(color_dict):\r\n ret = []\r\n for i in range(len(color_dict)):\r\n ret.append('#' + color_dict[i]['@rgb'])\r\n return ret", "def color_group(max_range):\n\n color = []\n\n for _ in range(0, max_range):\n col = []\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n color.append(col)\n\n dist_table = []\n\n for idx in range(0, max_range):\n dist_table.append([color_distance(color[idx], x) for x in color[:]])\n\n for _ in range(0, 50):\n for idx_start in range(0, max_range):\n global_point_distance = sum(dist_table[idx_start])\n tmp_dist_table = dist_table[idx_start][:]\n tmp_table = color[:]\n for idx_end in range(0, max_range):\n tmp_table[idx_end] = mutate_color(color[idx_end])\n tmp_dist_table[idx_end] = color_distance(\n color[idx_start],\n color[idx_end])\n if sum(tmp_dist_table) > global_point_distance:\n dist_table[idx_start] = tmp_dist_table[:]\n color = tmp_table[:]\n\n #for index in range(0, len(color)):\n # color[index] = hls_to_rgb(\n # color[index][0],\n # color[index][1],\n # color[index][2])\n\n return color", "def linear_gradient(start_hex, finish_hex=\"#FFFFFF\", n=10):\n if finish_hex=='opaque': finish_hex=start_hex\n # Starting and ending colors in RGB form\n s = _hex2rgb(start_hex)\n f = _hex2rgb(finish_hex)\n # Initilize a list of the output colors with the starting color\n RGB_list = [s]\n # Calcuate a color at each evenly spaced value of t from 1 to n\n for t in range(1, n):\n # Interpolate RGB vector for color at the current value of t\n curr_vector = [\n int(s[j] + (float(t) / (n - 1)) * (f[j] - s[j]))\n for j in range(3)\n ]\n # Add it to our list of output colors\n RGB_list.append(curr_vector)\n\n # convert to dict\n coldict = _color_dict(RGB_list)\n coldict['opaque'] = _incremental_steps(1, 0, len(RGB_list))\n # return\n return coldict", "def gradient_cmap(gcolors, nsteps=256, bounds=None):\n from matplotlib.colors import LinearSegmentedColormap\n\n ncolors = len(gcolors)\n if bounds is None:\n bounds = np.linspace(0, 1, ncolors)\n\n reds = []\n greens = []\n blues = []\n alphas = []\n for b, c in zip(bounds, gcolors):\n reds.append((b, c[0], c[0]))\n greens.append((b, c[1], c[1]))\n blues.append((b, c[2], c[2]))\n alphas.append((b, c[3], c[3]) if len(c) == 4 else (b, 1.0, 1.0))\n\n cdict = {\n \"red\": tuple(reds),\n \"green\": tuple(greens),\n \"blue\": tuple(blues),\n \"alpha\": tuple(alphas),\n }\n\n cmap = LinearSegmentedColormap(\"grad_colormap\", cdict, nsteps)\n return cmap", "def Gradient( x=np.linspace(0,1,2), min=None, max=None ):\n if min is None:\n min = x.min()\n if max is None:\n max = x.max()-min\n else:\n max -= min\n x_ = x - min\n x_ /= max\n return cmap_gradient( x_ )", "def create_cycler_colors(color_scheme):\n cmap = cm.get_cmap(color_scheme) # PiYG\n cycler_colors = []\n\n for i in range(cmap.N):\n rgba = cmap(i)\n # rgb2hex accepts rgb or rgba\n cycler_colors.append(matplotlib.colors.rgb2hex(rgba)) \n \n return cycler_colors", "def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))", "def colors_to_cmap(colors):\n colors = np.asarray(colors)\n if colors.shape[1] == 3:\n colors = np.hstack((colors, np.ones((len(colors),1))))\n steps = (0.5 + np.asarray(range(len(colors)-1), dtype=np.float))/(len(colors) - 1)\n return matplotlib.colors.LinearSegmentedColormap(\n 'auto_cmap',\n {clrname: ([(0, col[0], col[0])] + \n [(step, c0, c1) for (step,c0,c1) in zip(steps, col[:-1], col[1:])] + \n [(1, col[-1], col[-1])])\n for (clridx,clrname) in enumerate(['red', 'green', 'blue', 'alpha'])\n for col in [colors[:,clridx]]},\n N=len(colors))", "def gen_colors(nbeads):\n\n # use colormap (blue, red, green)\n # blue 0.0, 0.0, 1.0\n # red: 1.0, 0.0, 0.0\n # green: 0.0, 1.0, 1.0\n\n my_cmap = cm.get_cmap('jet')\n minval = 0\n maxval = nbeads - 1\n norm = mplcolors.Normalize(minval, maxval)\n \n sphere_rgbcolor = []\n for i in range(nbeads):\n si = my_cmap(norm(i))\n sphere_rgbcolor.append([si[0], si[1], si[2]])\n\n return sphere_rgbcolor", "def gradient(text, start_color, end_color):\n length = len(text)\n\n # the delta of each color\n delta_r = start_color.r - end_color.r\n delta_g = start_color.g - end_color.g\n delta_b = start_color.b - end_color.b\n\n # the \"step\" sizes for each of the colors\n step_r = delta_r / length\n step_g = delta_g / length\n step_b = delta_b / length\n \n result = \"\"\n current_color = start_color\n\n for character in text:\n result += \"%s%s\" % (current_color, character)\n current_color -= Color(step_r, step_g, step_b)\n\n return \"%s%s\" % (result, RESET)", "def create_colormap(color_list: Sequence[str], n_colors: int) -> NDArrayFloat:\n cmap = LinearSegmentedColormap.from_list(name=\"dummy_name\", colors=color_list)\n colorscale: NDArrayFloat = np.array(\n [cmap(k * 1 / n_colors) for k in range(n_colors)]\n )\n # ignore the 4th alpha channel\n return colorscale[:, :3]", "def _get_colors(num_colors):\n cmap = plt.get_cmap()\n return [cmap(1. * i / num_colors) for i in range(num_colors)]", "def compile_palette(palette_list):\n global _COMPILED_PALETTE\n _COMPILED_PALETTE = {}\n\n for color in palette_list:\n r_sum = math.fabs(color[0] ** _MAGNITUDE)\n g_sum = math.fabs(color[1] ** _MAGNITUDE)\n b_sum = math.fabs(color[2] ** _MAGNITUDE)\n\n _COMPILED_PALETTE[color] = [r_sum, g_sum, b_sum]", "def set_color_list(self, new_list):\n self.__clr_list = itertools.cycle(new_list)", "def get_color_codes(y):\n y = column_or_1d(y)\n\n # inliers are assigned blue\n c = np.full([len(y)], 'b', dtype=str)\n outliers_ind = np.where(y == 1)\n\n # outlier are assigned red\n c[outliers_ind] = 'r'\n\n return c", "def colorToBeads(clidx, filament_list, color, part_list_x, part_list_y, part_list_phi):\n cl_beads_x = []\n cl_beads_y = []\n cl_beads_p = []\n for i in filament_list:\n for j in range(N):\n ind = int(i*N+j)\n clidx[ind] = color\n cl_beads_x.append(part_list_x[ind])\n cl_beads_y.append(part_list_y[ind])\n cl_beads_p.append(part_list_phi[ind])\n \n xmin = min(cl_beads_x)\n ymin = min(cl_beads_y)\n xmax = max(cl_beads_x)\n ymax = max(cl_beads_y)\n \n flag = False\n x_span = xmax-xmin\n y_span = ymax-ymin\n if x_span > Lx/2:\n flag = True\n elif y_span > Ly/2:\n flag = True\n \n return cl_beads_x, cl_beads_y, cl_beads_p, xmin, xmax, ymin, ymax, flag", "def make_colormap(colors):\n#-------------------------\n from matplotlib.colors import LinearSegmentedColormap, ColorConverter\n from numpy import sort\n z = sort(colors.keys())\n n = len(z)\n z1 = min(z)\n zn = max(z)\n x0 = (z - z1) / (zn - z1)\n CC = ColorConverter()\n R = []\n G = []\n B = []\n for i in range(n):\n #i'th color at level z[i]:\n Ci = colors[z[i]]\n if type(Ci) == str:\n # a hex string of form '#ff0000' for example (for red)\n RGB = CC.to_rgb(Ci)\n else:\n # assume it's an RGB triple already:\n RGB = Ci\n R.append(RGB[0])\n G.append(RGB[1])\n B.append(RGB[2])\n cmap_dict = {}\n cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]\n cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]\n cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]\n mymap = LinearSegmentedColormap('mymap',cmap_dict)\n return mymap", "def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)", "def color(self, data):\n\n red = np.interp(data, self.range, self.r)\n blue = np.interp(data, self.range, self.b)\n green = np.interp(data, self.range, self.g)\n # Style plot to return a grey color when value is 'nan'\n red[np.isnan(red)] = 240\n blue[np.isnan(blue)] = 240\n green[np.isnan(green)] = 240\n colors = np.dstack([red.astype(np.uint8),\n green.astype(np.uint8),\n blue.astype(np.uint8),\n np.full_like(data, 255, dtype=np.uint8)])\n #return colors.view(dtype=np.uint32).reshape(data.shape)\n c=[]\n for i in range(len(data)):\n c.append([red[i],green[i],blue[i]])\n return c", "def linear_gradient(value, start, end, colour_list=None):\n\n # Translate the end colour to RGB arrays if necessary.\n if isinstance(start, str):\n # Default (search the molmol list then the X11 list).\n if colour_list == None:\n try:\n start = molmol_colours(start)\n except:\n start = x11_colours(start)\n\n # Molmol colours.\n elif colour_list == 'molmol':\n start = molmol_colours(start)\n\n # X11 colours.\n elif colour_list == 'x11':\n start = x11_colours(start)\n\n # Translate the end colour to RGB arrays if necessary.\n if isinstance(end, str):\n # Default (search the molmol list then the X11 list).\n if colour_list == None:\n try:\n end = molmol_colours(end)\n except:\n end = x11_colours(end)\n\n # Molmol colours.\n elif colour_list == 'molmol':\n end = molmol_colours(end)\n\n # X11 colours.\n elif colour_list == 'x11':\n end = x11_colours(end)\n\n # Truncate the value to be between zero and one.\n if value < 0.0:\n value = 0.0\n elif value > 1.0:\n value = 1.0\n\n # The position on the linear gradient.\n return value * (end - start) + start", "def monochrome_palette(basecolor, nstops, s_range=(1, 0.3), v_range=(1, 1.3), absolute=False):\n def clip(val, varname):\n if val < 0:\n val = 0\n logger.warning(\"[monochrome_palette]: \" + varname +\n \" was smaller than 0 and was clipped.\")\n elif val > 1:\n val = 1\n logger.warning(\"[monochrome_palette]: \" + varname +\n \" was greater than 1 and was clipped.\")\n return val\n\n if isinstance(basecolor, tuple):\n if any( v>1 for v in basecolor ):\n raise ValueError(\"If you are defining the basecolor by an \"\n \"RGB tuple, the values must be between 0 and 1. \"\n \"Specified basecolor: {}.\".format(str(basecolor)))\n basergb = mpl.colors.to_rgb(basecolor)\n h, s, v = mpl.colors.rgb_to_hsv(basergb)\n if absolute:\n s = 1; v = 1\n s_range = (clip(s_range[0] * s, 'saturation'), clip(s_range[1] * s, 'saturation'))\n v_range = (clip(v_range[0] * v, 'value'), clip(v_range[1] * v, 'value'))\n\n slist = [a*s for a in np.linspace(s_range[0], s_range[1], nstops)]\n vlist = [a*v for a in np.linspace(v_range[0], v_range[1], nstops)]\n clist = [mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s_el, v_el)))\n for s_el, v_el in zip(slist, vlist)]\n return clist", "def create_funky_cmap(n_colors):\n\n colors = []\n for i in np.arange(0., 360., 360. / n_colors):\n h = i / 360.\n l = (50 + np.random.rand() * 10) / 100.\n s = (90 + np.random.rand() * 10) / 100.\n colors.append(hls_to_rgb(h, l, s))\n\n return colors", "def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap", "def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):\r\n colors = linear_gradient(start_hsv, end_hsv, n)\r\n names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]\r\n return dict(zip(names, colors))", "def get_colors(color_map, count):\n cols = plt.get_cmap(color_map, count + 1) # +1 to prevent wrapping, where col 0 is same as col -1\n cols = cols(range(count + 1)).tolist() # Create a list of colours\n return cols[:-1] # Remove overlapping colour and return", "def _linear_cmap(a, b):\n a = matplotlib.colors.colorConverter.to_rgb(a)\n b = matplotlib.colors.colorConverter.to_rgb(b)\n a_linear = _gamma_expand(a)\n b_linear = _gamma_expand(b)\n color_diff = a_linear - b_linear\n palette = (np.linspace(0, 1, 256).reshape((-1, 1))\n * color_diff.reshape((1, -1)))\n palette += b_linear\n palette = _gamma_compress(palette)\n return matplotlib.colors.ListedColormap(palette)" ]
[ "0.69815975", "0.6241875", "0.608826", "0.6028528", "0.5951059", "0.58788097", "0.58780885", "0.58316827", "0.5818387", "0.5781078", "0.56648606", "0.5645055", "0.5637384", "0.56352425", "0.56185704", "0.55861217", "0.5581655", "0.5577476", "0.5556196", "0.5546131", "0.5544579", "0.55366987", "0.5520505", "0.55107594", "0.55081683", "0.55052733", "0.54965675", "0.54909843", "0.5489037", "0.54789" ]
0.7146094
0
Return a gradient list of (n) colors between two hex colors. start_hex and finish_hex should be the full sixdigit color string, inlcuding the number sign ("FFFFFF")
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10): if finish_hex=='opaque': finish_hex=start_hex # Starting and ending colors in RGB form s = _hex2rgb(start_hex) f = _hex2rgb(finish_hex) # Initilize a list of the output colors with the starting color RGB_list = [s] # Calcuate a color at each evenly spaced value of t from 1 to n for t in range(1, n): # Interpolate RGB vector for color at the current value of t curr_vector = [ int(s[j] + (float(t) / (n - 1)) * (f[j] - s[j])) for j in range(3) ] # Add it to our list of output colors RGB_list.append(curr_vector) # convert to dict coldict = _color_dict(RGB_list) coldict['opaque'] = _incremental_steps(1, 0, len(RGB_list)) # return return coldict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_color_gradient():\n colors = []\n step = 10\n for red, green in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': green, 'blue': 0})\n for green, blue in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': 0, 'green': green, 'blue': blue})\n for blue, red in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': 0, 'blue': blue})\n return colors", "def gradient(text, start_color, end_color):\n length = len(text)\n\n # the delta of each color\n delta_r = start_color.r - end_color.r\n delta_g = start_color.g - end_color.g\n delta_b = start_color.b - end_color.b\n\n # the \"step\" sizes for each of the colors\n step_r = delta_r / length\n step_g = delta_g / length\n step_b = delta_b / length\n \n result = \"\"\n current_color = start_color\n\n for character in text:\n result += \"%s%s\" % (current_color, character)\n current_color -= Color(step_r, step_g, step_b)\n\n return \"%s%s\" % (result, RESET)", "def linearGradient(self,start_rgb, finish_rgb, n):\n rgb_list = [start_rgb]\n\n for t in range(1, n):\n current_color = [int((start_rgb[j]) + (float(t)/(n-1))*(finish_rgb[j] - start_rgb[j])) for j in range(3)]\n rgb_list.append(current_color)\n\n return rgb_list", "def get_colors(n, cmap=\"viridis\", start=0.0, stop=1.0, alpha=1.0, return_hex=False):\n colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)]\n colors = [(r, g, b, alpha) for r, g, b, _ in colors]\n if return_hex:\n colors = _rgb_color_list_to_hex(colors)\n return colors", "def _generate_colors(\n self, x: NDArrayFloat\n ) -> Sequence[RGBHexColor | None]:\n x = np.asarray(x)\n idx = np.round((x * 255) + ROUNDING_JITTER).astype(int)\n arr = np.column_stack(\n [self._r_lookup[idx], self._g_lookup[idx], self._b_lookup[idx]]\n )\n return [rgb_to_hex(c) for c in arr]", "def n_colors(lowcolor, highcolor, n_colors):\n diff_0 = float(highcolor[0] - lowcolor[0])\n incr_0 = diff_0/(n_colors - 1)\n diff_1 = float(highcolor[1] - lowcolor[1])\n incr_1 = diff_1/(n_colors - 1)\n diff_2 = float(highcolor[2] - lowcolor[2])\n incr_2 = diff_2/(n_colors - 1)\n color_tuples = []\n\n for index in range(n_colors):\n new_tuple = (lowcolor[0] + (index * incr_0),\n lowcolor[1] + (index * incr_1),\n lowcolor[2] + (index * incr_2))\n color_tuples.append(new_tuple)\n\n return color_tuples", "def split_colors(self, color_count, color_from, color_to):\n colors = []\n for c in range(3):#RGB\n step = np.abs(color_from[c] - color_to[c])/color_count\n if step:\n if color_from[c]>color_to[c]:\n color = np.arange(color_from[c],color_to[c],-step)\n else:\n color = np.arange(color_from[c],color_to[c],step)\n else:\n color = [color_from[c] for i in np.arange(color_count)]\n\n\n colors.append(color)\n colors = [(a,b,c) for a,b,c in zip(colors[0],colors[1],colors[2])]\n return colors", "def _color_brew(n):\n color_list = []\n\n # Initialize saturation & value; calculate chroma & value shift\n s, v = 0.75, 0.9\n c = s * v\n m = v - c\n\n for h in np.arange(25, 385, 360. / n).astype(int):\n # Calculate some intermediate values\n h_bar = h / 60.\n x = c * (1 - abs((h_bar % 2) - 1))\n # Initialize RGB with same hue & chroma as our color\n rgb = [(c, x, 0),\n (x, c, 0),\n (0, c, x),\n (0, x, c),\n (x, 0, c),\n (c, 0, x),\n (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n # Shift the initial RGB values to match value and store\n rgb = [(int(255 * (r + m))),\n (int(255 * (g + m))),\n (int(255 * (b + m)))]\n color_list.append(rgb)\n\n return color_list", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def extract_colors(self, palette, colors):\n return [palette[i:i + 3] for i in range(0, colors * 3, 3)]", "def gradient_cmap(gcolors, nsteps=256, bounds=None):\n from matplotlib.colors import LinearSegmentedColormap\n\n ncolors = len(gcolors)\n if bounds is None:\n bounds = np.linspace(0, 1, ncolors)\n\n reds = []\n greens = []\n blues = []\n alphas = []\n for b, c in zip(bounds, gcolors):\n reds.append((b, c[0], c[0]))\n greens.append((b, c[1], c[1]))\n blues.append((b, c[2], c[2]))\n alphas.append((b, c[3], c[3]) if len(c) == 4 else (b, 1.0, 1.0))\n\n cdict = {\n \"red\": tuple(reds),\n \"green\": tuple(greens),\n \"blue\": tuple(blues),\n \"alpha\": tuple(alphas),\n }\n\n cmap = LinearSegmentedColormap(\"grad_colormap\", cdict, nsteps)\n return cmap", "def condense_hex_colors(css):\n log.debug(\"Condensing all hexadecimal color values.\")\n regex = re.compile(\n r\"\"\"([^\\\"'=\\s])(\\s*)#([0-9a-f])([0-9a-f])([0-9a-f])\"\"\"\n + r\"([0-9a-f])([0-9a-f])([0-9a-f])\", re.I | re.S)\n match = regex.search(css)\n while match:\n first = match.group(3) + match.group(5) + match.group(7)\n second = match.group(4) + match.group(6) + match.group(8)\n if first.lower() == second.lower():\n css = css.replace(\n match.group(), match.group(1) + match.group(2) + '#' + first)\n match = regex.search(css, match.end() - 3)\n else:\n match = regex.search(css, match.end())\n return css", "def color_hex(self):\n n = 2\n return tuple(\n hex(int(self.color[i : i + n], 16)) for i in range(0, len(self.color), n)\n )", "def get_colors(n_curves):\n # Use offsets to avoid shitty crappy min/max colors (ugly yellow or white for example)\n first_step = 1. / (n_curves + 1.)\n last_step = 1.\n nb_steps = n_curves + 1\n color_range = np.linspace(first_step, last_step, nb_steps)\n color = iter(plt.cm.hot(color_range))\n return color", "def unique_colors_rgb(n):\r\n hues = []\r\n # i is in the range 0, 1, ..., n - 1\r\n for i in range(1, n + 1):\r\n hues.append(360.0 / i)\r\n\r\n hs = []\r\n for hue in hues:\r\n h = math.floor(hue / 60) % 6\r\n hs.append(h)\r\n\r\n fs = []\r\n for hue in hues:\r\n f = hue / 60 - math.floor(hue / 60)\r\n fs.append(f)\r\n\r\n rgbcolors = []\r\n for h, f in zip(hs, fs):\r\n v = 1\r\n p = 0\r\n q = 1 - f\r\n t = f\r\n if h == 0:\r\n color = v, t, p\r\n elif h == 1:\r\n color = q, v, p\r\n elif h == 2:\r\n color = p, v, t\r\n elif h == 3:\r\n color = p, q, v\r\n elif h == 4:\r\n color = t, p, v\r\n elif h == 5:\r\n color = v, p, q\r\n rgbcolors.append(color)\r\n\r\n return rgbcolors", "def split_colours(colours_list, colours_required):\n colour_range = spectra.range(colours_list, colours_required)\n return [colour.hexcode for colour in colour_range]", "def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):\r\n colors = linear_gradient(start_hsv, end_hsv, n)\r\n names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]\r\n return dict(zip(names, colors))", "def getPseudoColor( nValue, nMin = 0, nMax = 255, nErrorValue = -1 ):\n r, g, b = [0,0,0];\n # Invalid Data -> set Color to White\n if( nValue == nErrorValue ):\n r = 0xff;\n g = 0xff;\n b = 0xff;\n # Invalid Range\n # -> Set Color to Grey (debug)\n elif( nValue < nMin or nValue > nMax ):\n r = 0x80;\n g = 0x80;\n b = 0x80;\n else:\n # each part as a specific color computation\n # 5 parts:\n \"\"\"\n nSizePhase = ( nMax - nMin ) / 5;\n if( nValue < nSizePhase * 1 ):\n r = 0xff;\n g = (((nValue-nMin)-0*nSizePhase)*0xff)/nSizePhase;\n b = 0x0;\n elif( nValue < nSizePhase * 2 ):\n r = 0xff - (((nValue-nMin)-1*nSizePhase)*0xff)/nSizePhase;\n g = 0xff;\n b = 0;\n elif( nValue < nSizePhase * 3 ):\n r = 0;\n g = 0xff;\n b = (((nValue-nMin)-2*nSizePhase)*0xff)/nSizePhase;\n elif( nValue < nSizePhase * 4 ):\n r = 0;\n g = 0xff - (((nValue-nMin)-3*nSizePhase)*0xff)/nSizePhase;\n b = 0xff;\n else:\n r = 0xff;\n g = 0;\n b = 0xff - (((nValue-nMin)-4*nSizePhase)*0xff)/nSizePhase;\n \"\"\"\n # 2 parts: (green to yellow then yellow to red)\n nSizePhase = ( nMax - nMin ) / 2;\n if( nValue < nSizePhase * 1 ):\n r = (((nValue-nMin)-0*nSizePhase)*0xff)/nSizePhase;\n g = 0xff;\n b = 0x0;\n elif( nValue < nSizePhase * 2 ):\n r = 0xff;\n g = 0xff - (((nValue-nMin)-1*nSizePhase)*0xff)/nSizePhase;\n b = 0; \n # else - end\n return [r,g,b];", "def fade_color(c1, c2, n):\n assert n >= 2\n\n # decompose RGB. ignore alpha if present.\n rgb1 = get_channels(c1)\n rgb2 = get_channels(c2)\n\n # find distances by chanel.\n step_by_channel = (rgb2 - rgb1) / (n - 1)\n\n # build steps.\n scale = [rgb1 + (i * step_by_channel) for i in range(n)]\n scale = [get_hexcode(c) for c in scale]\n\n assert scale[0] == c1\n assert scale[-1] == c2\n\n return scale", "def linear_gradient(value, start, end, colour_list=None):\n\n # Translate the end colour to RGB arrays if necessary.\n if isinstance(start, str):\n # Default (search the molmol list then the X11 list).\n if colour_list == None:\n try:\n start = molmol_colours(start)\n except:\n start = x11_colours(start)\n\n # Molmol colours.\n elif colour_list == 'molmol':\n start = molmol_colours(start)\n\n # X11 colours.\n elif colour_list == 'x11':\n start = x11_colours(start)\n\n # Translate the end colour to RGB arrays if necessary.\n if isinstance(end, str):\n # Default (search the molmol list then the X11 list).\n if colour_list == None:\n try:\n end = molmol_colours(end)\n except:\n end = x11_colours(end)\n\n # Molmol colours.\n elif colour_list == 'molmol':\n end = molmol_colours(end)\n\n # X11 colours.\n elif colour_list == 'x11':\n end = x11_colours(end)\n\n # Truncate the value to be between zero and one.\n if value < 0.0:\n value = 0.0\n elif value > 1.0:\n value = 1.0\n\n # The position on the linear gradient.\n return value * (end - start) + start", "def color_transition(start_color, end_color, steps=20):\n assert isinstance(start_color, Color), \"start_color must be a Color instance\"\n assert isinstance(end_color, Color), \"end_color must be a Color instance\"\n\n h1, s1, v1 = start_color.hsv\n h2, s2, v2 = end_color.hsv\n\n h_seq = hsv_transition(h1, h2, steps, wrap=True)\n s_seq = hsv_transition(s1, s2, steps)\n v_seq = hsv_transition(v1, v2, steps)\n\n for (h, s, v) in zip(h_seq, s_seq, v_seq):\n yield HSV(h % 1, s, v)", "def graphColorRange(min, minColor, max, maxColor, numSlices=96, id=[0]):\n l = (graphColorSlice(\"-INF\", min, minColor, id=id) +\n graphColorSlice(max, \"INF\", maxColor, id=id))\n prevAlpha = 0\n while prevAlpha < 1:\n alpha = prevAlpha + 1.0 / numSlices\n l += graphColorSlice(prevAlpha*(max-min)+min,\n alpha*(max-min)+min,\n minColor.hsvBlend(maxColor, (alpha + prevAlpha) * 0.5),\n id=id)\n prevAlpha = alpha\n return l", "def monochrome_palette(basecolor, nstops, s_range=(1, 0.3), v_range=(1, 1.3), absolute=False):\n def clip(val, varname):\n if val < 0:\n val = 0\n logger.warning(\"[monochrome_palette]: \" + varname +\n \" was smaller than 0 and was clipped.\")\n elif val > 1:\n val = 1\n logger.warning(\"[monochrome_palette]: \" + varname +\n \" was greater than 1 and was clipped.\")\n return val\n\n if isinstance(basecolor, tuple):\n if any( v>1 for v in basecolor ):\n raise ValueError(\"If you are defining the basecolor by an \"\n \"RGB tuple, the values must be between 0 and 1. \"\n \"Specified basecolor: {}.\".format(str(basecolor)))\n basergb = mpl.colors.to_rgb(basecolor)\n h, s, v = mpl.colors.rgb_to_hsv(basergb)\n if absolute:\n s = 1; v = 1\n s_range = (clip(s_range[0] * s, 'saturation'), clip(s_range[1] * s, 'saturation'))\n v_range = (clip(v_range[0] * v, 'value'), clip(v_range[1] * v, 'value'))\n\n slist = [a*s for a in np.linspace(s_range[0], s_range[1], nstops)]\n vlist = [a*v for a in np.linspace(v_range[0], v_range[1], nstops)]\n clist = [mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s_el, v_el)))\n for s_el, v_el in zip(slist, vlist)]\n return clist", "def get_colour(progress, colours):\n if progress >= 0 and progress <= 1:\n start_colour, end_colour = colours[0], colours[1]\n\n r = start_colour[0] + (end_colour[0] - start_colour[0]) * progress\n b = start_colour[1] + (end_colour[1] - start_colour[1]) * progress\n g = start_colour[2] + (end_colour[2] - start_colour[2]) * progress\n\n return '#%02x%02x%02x' % (round(r), round(b), round(g))\n \n else: return '#000000'", "def rgb_gradient(colours, fractions, n=255, cmap=None):\n cols = np.array(colours)\n fracs = np.array(fractions)\n if cols.shape[0] != fracs.size:\n raise ValueError('colours.shape[0] != fractions.size')\n colors = []\n for i in range(n):\n frac = float(i)/float(n-1)\n if frac <= fracs[0]: # before first fraction\n colors += [tuple(cols[0, :])]\n elif frac >= fracs[-1]: # after last fraction\n colors += [tuple(cols[-1, :])]\n else:\n ii = np.where(fracs >= frac)[0][0]\n if np.abs(fracs[ii]-frac) > const.eps: # exactly a fraction\n colors += [rgb_blend(cols[ii-1, :], cols[ii, :], frac)]\n else:\n colors += [tuple(cols[ii, :])]\n\n if cmap is not None:\n import matplotlib.colors as col\n import matplotlib.cm as cm\n iscmap = col.ListedColormap(colors, name=cmap, N=n)\n cm.register_cmap(name=cmap, cmap=iscmap)\n\n return colors", "def nthColor(i):\n if i < len(colors):\n return colors[i]\n\n c1 = colors[i % len(colors)]\n c2 = nthColor(i // len(colors))\n\n return \"#\" + hex((int(c1[1:],16) + int(c2[1:],16)) // 2)[2:]", "def fade(startColor, endColor, steps, interval, strip):\r\n lastUpdate = utime.time() - interval\r\n for i in range(0, steps):\r\n print(\"range step: \", steps)\r\n red = ((startColor[0] * (steps - i)) + (endColor[0] * i)) // steps\r\n green = ((startColor[1] * (steps - i)) + (endColor[1] * i)) // steps\r\n blue = ((startColor[2] * (steps - i)) + (endColor[2] * i)) // steps\r\n \r\n while ((utime.time() - lastUpdate) < interval):\r\n pass\r\n setStrip(strip, (red, green, blue))\r\n lastUpdate = utime.time()", "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n pal = _ColorPalette(pal(np.linspace(0, 1, n_colors)))\n return pal", "def Gradient( x=np.linspace(0,1,2), min=None, max=None ):\n if min is None:\n min = x.min()\n if max is None:\n max = x.max()-min\n else:\n max -= min\n x_ = x - min\n x_ /= max\n return cmap_gradient( x_ )", "def calculate_colors(v):\n\n # Define constant color values\n lightness = [0.35, 0.5, 0.65]\n saturation = [0.35, 0.5, 0.65]\n\n # Calculate the CRC-32 checksum of colors encoded as a UTF-8 string\n hash = crc32(str(v).encode('utf-8')) & 0xffffffff\n\n # Calculate the HSL (hue, saturation, lightness) values for the vertices\n hue = ((hash % 359) / 1000) * 360\n hash //= 360\n sat = saturation[hash % len(saturation)]\n hash //= len(saturation)\n lig = lightness[hash % len(lightness)]\n\n return (hue, sat, lig)" ]
[ "0.7464628", "0.730974", "0.6747355", "0.64438266", "0.6420821", "0.6330026", "0.6233631", "0.61494654", "0.614863", "0.60044503", "0.5992193", "0.5977377", "0.59677243", "0.5953647", "0.5910034", "0.590775", "0.59075046", "0.58731186", "0.58708364", "0.586321", "0.5853994", "0.58416027", "0.5839125", "0.5770986", "0.5761376", "0.5738426", "0.5732354", "0.5728288", "0.5712357", "0.5700607" ]
0.7984741
0
Color to dictionary. Takes in a list of RGB sublists and returns dictionary of colors in RGB and hex form for use in a graphing function defined later on.
def _color_dict(gradient): hex_colors = [_rgb2hex(RGB) for RGB in gradient] rgb_colors = np.c_[[RGB[0] for RGB in gradient], [RGB[1] for RGB in gradient], [RGB[2] for RGB in gradient]] return {'hex': hex_colors, 'rgb': rgb_colors}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors", "def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))", "def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists", "def create_color_dict() -> Dict[Tuple[str, int, int], Tuple[int, int, int]]:\n file = 'real_sRGB.csv'\n dictionary = dict()\n hues = set()\n with open(file, 'r') as f:\n lines = f.read().splitlines()\n for i in range(1, len(lines)):\n _, hue, value, chroma, *_, r, g, b = lines[i].split(',')\n value = int(value)\n chroma = int(chroma)\n r = int(r)\n g = int(g)\n b = int(b)\n dictionary[(hue, value, chroma)] = (r, g, b)\n hues.add(hue)\n\n # Add grayscale values for each hue.\n chroma = 0\n for hue in hues:\n for value in range(11):\n dictionary[(hue, value, chroma)] = gray_rgb_values[value]\n\n # Interpolate to create odd chroma values.\n for (hue, value, chroma) in tuple(dictionary.keys()):\n low_rgb = dictionary[(hue, value, chroma)]\n high_rgb = dictionary.get((hue, value, chroma + 2))\n if high_rgb is not None:\n dictionary[(hue, value, chroma + 1)] = average(low_rgb, high_rgb)\n return dictionary", "def colors(self):\n unique, counts = np.unique(self.arr, return_counts=True)\n return {k: v for (k, v) in zip(unique, counts)}", "def get_color_map_in_hex(rgb_colors):\n list_of_hex_colors = []\n # Iterating through the list of colors given\n for i in range(len(rgb_colors)):\n rgb = []\n # Iterating through each rgb to get them into a range of 0-255\n for j in range(3):\n num = int(rgb_colors[i][j] * 255)\n rgb.append(num)\n # Converting the rgb to hex and appending them to a new list\n list_of_hex_colors.append(rgb_to_hex(rgb))\n return list_of_hex_colors", "def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):\r\n colors = linear_gradient(start_hsv, end_hsv, n)\r\n names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]\r\n return dict(zip(names, colors))", "def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors", "def colors(self) -> dict:\n raise NotImplementedError", "def get_color_in_rgb_decimal():\n\n # Grabbing custom colormap from matplotlib\n a = cm.get_cmap('cool', 32)\n b = cm.get_cmap('spring', 32)\n c = cm.get_cmap('autumn_r', 64)\n d = cm.get_cmap('bwr_r', 192)\n e = cm.get_cmap('Greens', 192)\n\n # Adding the colormaps into one stack to have a more comprehensive color spectrum \n newcolors = np.vstack((a(np.linspace(0, 1, 32)), \n b(np.linspace(0, 1, 32)), \n c(np.linspace(0, 1, 64)),\n d(np.linspace(0, 0.5, 192)),\n e(np.linspace(0, 1, 192)),\n ))\n return newcolors", "def assign_colors(data: List[EmissionPerCapita]) -> dict:\r\n colors = {}\r\n for emission in data:\r\n r = random.randint(1, 255)\r\n g = random.randint(1, 255)\r\n b = random.randint(1, 255)\r\n color = \"rgb(\" + str(r) + \",\" + str(g) + \",\" + str(b) + \")\"\r\n dict.update(colors, {emission.name: color})\r\n\r\n return colors", "def __init__(self, ordered_list):\n\n self.colors = dict((i, color) for (i, color) in enumerate(ordered_list))", "def _create_color_lot(color_names, color_subnames, color_dict_rgb):\n lot = {}\n i = 0\n for sn in np.arange(len(color_subnames)):\n for n in np.arange(len(color_names)):\n lot[i] = color_dict_rgb[color_names[n]][color_subnames[sn]]\n i += 1\n\n return lot", "def create_colors_list(color_dict):\r\n ret = []\r\n for i in range(len(color_dict)):\r\n ret.append('#' + color_dict[i]['@rgb'])\r\n return ret", "def compile_palette(palette_list):\n global _COMPILED_PALETTE\n _COMPILED_PALETTE = {}\n\n for color in palette_list:\n r_sum = math.fabs(color[0] ** _MAGNITUDE)\n g_sum = math.fabs(color[1] ** _MAGNITUDE)\n b_sum = math.fabs(color[2] ** _MAGNITUDE)\n\n _COMPILED_PALETTE[color] = [r_sum, g_sum, b_sum]", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap", "def get_colors(color_list):\n rgba_colors = []\n a = [0.5,0.5,0.6,0.4,0.3,0.2]\n i = 0\n for c in color_list:\n rgba_colors.append(list(colors.to_rgba(c)))\n rgba_colors[i][3] = a[i]\n i+=1\n\n return rgba_colors", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def ordered_colors():\n\n return [(\"yellow\",0.263) ,(\"orange\", 0.047), (\"red\",0.0),(\"green\", 0.444), (\"purple\", 0.972)]", "def _string_to_colors(self):\n string = self.str_colors\n colors_three = [string[c:c+3] for c in range(0, len(string), 3)]\n colors_three = [list(color) for color in colors_three]\n pixels = [[ord(rgb) for rgb in color] for color in colors_three]\n return pixels", "def get_color_words():\n color_word_dict = {}\n color_data = csv.reader(open('./color_names.csv'), delimiter=\",\", quotechar='\"')\n\n for row in color_data:\n if row[0] != \"Colour Name\":\n name = row[0].lower()\n family = row[2].lower()\n hex_value = row[3].lower()\n color_word_dict[name] = (hex_value, family)\n return color_word_dict", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def colorPaletteToRGB(image_data,color_table): \n color_table_array = numpy.array([ord(c) for c in color_table])\n n_colors = color_table_array.size / 3\n color_table_array = color_table_array.reshape((n_colors,3))\n channels = [color_table_array[image_data,i] for i in range(3)]\n return channels", "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]", "def hex2rgb(colors):\n if 'str' in str(type(colors)):\n colors = np.array([colors])\n\n rgbcolors = list(map(lambda x: matplotlib.colors.to_rgb(x), colors))\n return np.array(rgbcolors)", "def get_segment_colour_map(self, features):\n\n hashList = {'1' : 'Grey',\n '2':'Red',\n '3':'Green',\n '4':'greenyellow',\n '5':'Pink',\n '6':'Orange',\n '7':'goldenrod',\n '8':'indianred',\n '9':'peachpuff',\n '10':'deepskyblue',\n '11':'firebrick',\n '12':'orchid',\n '13': 'moccasin',\n '14':'slateblue',\n '15':'turquoise',\n '16':'tomato',\n '17':'darkmagenta',\n '18':'olivedrab'}\n return hashList", "def _hex2rgb(c_hex):\n # Pass 16 to the integer function for change of base\n return [int(c_hex[i:i + 2], 16) for i in range(1, 6, 2)]", "def _rgb_color_list_to_hex(color_list):\n color_list_rgb = [[int(x * 255) for x in c[0:3]] for c in color_list]\n color_list_hex = [f\"#{rgb[0]:02X}{rgb[1]:02X}{rgb[2]:02X}\" for rgb in color_list_rgb]\n return color_list_hex", "def color_dict_to_objects(d, colorspace='hsv'):\r\n result = {}\r\n for k, v in d.items():\r\n result[k] = Color(k, v, colorspace)\r\n return result" ]
[ "0.72993577", "0.7166082", "0.71234846", "0.7105684", "0.67915034", "0.66504467", "0.6633686", "0.6622999", "0.656271", "0.65496135", "0.6542531", "0.64886117", "0.64846194", "0.63931656", "0.63931257", "0.6381189", "0.6377055", "0.6372067", "0.62352693", "0.62340975", "0.6232166", "0.6180067", "0.6158321", "0.6154185", "0.6118322", "0.6095956", "0.6082898", "0.6058045", "0.60556746", "0.6053452" ]
0.7698112
0
Set gradient on density color. This function determines the density of the data and adds a transparency column. If samples are in dense areas, transparency values are towards 1 (visible), whereas isn nonedense areas, the transparency values are towards 0 (not visible).
def gradient_on_density_color(X, c_rgb, labels, opaque_type='per_class', showfig=False, verbose='info'): # Set the logger set_logger(verbose=verbose) if labels is None: labels = np.repeat(0, X.shape[0]) from scipy.stats import gaussian_kde uilabels = np.unique(labels) # Add the transparency column of not exists if c_rgb.shape[1]<=3: c_rgb = np.c_[c_rgb, np.ones(c_rgb.shape[0])] density_colors = np.ones_like(c_rgb) if opaque_type=='all': try: # Compute density z = gaussian_kde(X.T)(X.T) weights = _normalize(z[np.argsort(z)[::-1]]) c_rgb[:, 3] = weights except: pass if (len(uilabels)!=len(labels)): for label in uilabels: idx = np.where(labels==label)[0] if X.shape[1]==2: xy = np.vstack([X[idx, 0], X[idx, 1]]) else: xy = np.vstack([X[idx, 0], X[idx, 1], X[idx, 2]]) try: # Compute density z = gaussian_kde(xy)(xy) # Sort on density didx = idx[np.argsort(z)[::-1]] passed = True # weights = _normalize(z[np.argsort(z)[::-1]]) except: didx=idx passed = False # order colors correctly based Density density_colors[didx] = c_rgb[idx, :] # Update the transparency level based on the density weights. if opaque_type=='per_class': weights = _normalize(z[np.argsort(z)[::-1]]) if passed else np.ones_like(idx) density_colors[didx, 3] = weights if showfig: plt.figure() fig, ax = plt.subplots(1, 2, figsize=(20, 10)) ax[0].scatter(X[didx, 0], X[didx, 1], color=c_rgb[idx, 0:3], alpha=c_rgb[idx, 3], edgecolor='#000000') ax[1].scatter(idx, idx, color=c_rgb[idx, 0:3], alpha=c_rgb[idx, 3], edgecolor='#000000') c_rgb=density_colors # Return return c_rgb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isothermal_depth_wyrtki1964_gradient(da_PT):\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # calculate drho/dz\n da_PT_dz = da_PT.differentiate('z') # kg/m^4\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT_dz.interp(z=np.arange(0,da_PT_dz.z.max(),2.5))\n\n # remove values shallower than critcal value\n da_interp_masked = da_interp.where(da_interp>0.02,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_interp_masked.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_interp.z[z_ind]*da_mask\n\n return da_itd", "def isothermal_depth_wyrtki1964_gradient(da_PT):\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # calculate drho/dz\n da_PT_dz = da_PT.differentiate('z') # kg/m^4\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT_dz.interp(z=np.arange(0,da_PT_dz.z.max(),2.5))\n\n # remove values shallower than critcal value\n da_interp_masked = da_interp.where(da_interp>0.02,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_interp_masked.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_interp.z[z_ind]*da_mask\n\n return da_itd", "def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm", "def gradient_fill(x, y, fill_color=None, palpha=1, ax=None, **kwargs):\n if ax is None:\n ax = plt.gca()\n\n line, = ax.plot(x, y,color=fill_color, **kwargs)\n if fill_color is None:\n fill_color = line.get_color()\n\n zorder = line.get_zorder()\n alpha = line.get_alpha()\n alpha = 1.0 if alpha is None else alpha\n\n z = np.empty((100, 1, 4), dtype=float)\n rgb = mcolors.colorConverter.to_rgb(fill_color)\n z[:,:,:3] = rgb\n z[:,:,-1] = np.linspace(0, palpha, 100)[:,None]\n\n xmin, xmax, ymin, ymax = x.min(), x.max(), y.min(), y.max()\n im = ax.imshow(z, aspect='auto', extent=[xmin, xmax, ymin, ymax],\n origin='lower', zorder=zorder)\n\n xy = np.column_stack([x, y])\n xy = np.vstack([[xmin, ymin], xy, [xmax, ymin], [xmin, ymin]])\n clip_path = mpatches.Polygon(xy, facecolor='none', edgecolor='none', closed=True)\n ax.add_patch(clip_path)\n im.set_clip_path(clip_path)\n \n patche = mpatches.Patch(color=fill_color,alpha=palpha)\n\n ax.autoscale(True)\n return line, patche", "def Gradient( x=np.linspace(0,1,2), min=None, max=None ):\n if min is None:\n min = x.min()\n if max is None:\n max = x.max()-min\n else:\n max -= min\n x_ = x - min\n x_ /= max\n return cmap_gradient( x_ )", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def gradient(self, type, colors, alphas, ratios, matrix):\n self.call('gradient', type,\n l[[a(v=x) for x in colors]],\n l[[a(v=x) for x in alphas]],\n l[[a(v=x) for x in ratios]],\n d[[i(k=k, v=v) for (k, v) in list(matrix.items())]])", "def set_print_density(self, density=10, break_time=2):\n self.write(self.ASCII_DC2, '#', (density << 5) | break_time)", "def set_density(self, theta, scale):\n self.density = Normal(self.mean(theta), scale)", "def gradient_fill(x, y, fill_color=None, ax=None, direction=1, **kwargs):\n\n line, = ax.plot(x, y, **kwargs)\n if fill_color is None:\n fill_color = line.get_color()\n\n # print fill_color\n zorder = line.get_zorder()\n alpha = line.get_alpha()\n alpha = 1.0 if alpha is None else alpha\n\n z = np.empty((100, 1, 4), dtype=float)\n rgb = mcolors.colorConverter.to_rgb(fill_color)\n z[:, :, :3] = rgb\n if direction == 1:\n z[:, :, -1] = np.linspace(0, alpha, 100)[:, None]\n else:\n z[:, :, -1] = np.linspace(alpha, 0, 100)[:, None]\n\n xmin, xmax, ymin, ymax = x.min(), x.max(), y.min(), y.max()\n im = ax.imshow(z, aspect='auto', extent=[xmin, xmax, ymin, ymax],\n origin='lower', zorder=zorder)\n\n xy = np.column_stack([x, y])\n if direction == 1:\n xy = np.vstack([[xmin, ymin], xy, [xmax, ymin], [xmin, ymin]])\n else:\n xy = np.vstack([[xmin, ymax], xy, [xmax, ymax], [xmin, ymax]])\n clip_path = Polygon(xy, lw=0.0, facecolor='none',\n edgecolor='none', closed=True)\n ax.add_patch(clip_path)\n im.set_clip_path(clip_path)\n\n ax.autoscale(True)\n\n return line, im", "def test_density_colormap(self):\n cmap = matplotlib.cm.get_cmap('density')\n np.testing.assert_allclose(cmap(0.0), [0.214, 0.152, 0.535, 1], atol=0.001)\n np.testing.assert_allclose(cmap(1.0), [0.988, 0.978, 0.042, 1], atol=0.001)", "def amended_gradient(self, dw, trn_X, trn_y, epsilon: float = 0.01, amend: float = 0.1):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dw_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='weights')\n dw_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='weights')\n dalpha_pos = self.finite_difference([(wp - wn) / 2 for wp, wn in zip(dw_pos, dw_neg)], trn_X, trn_y, 1, wrt='alpha')\n dalpha_neg = self.finite_difference([(wp - wn) / 2 for wp, wn in zip(dw_pos, dw_neg)], trn_X, trn_y, -1, wrt='alpha')\n hessian = [-amend * (p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian", "def gradualShadeH(img, brightness, direction=0, min_b=0.35, max_b=1.5):\n _, w, _ = img.shape\n img2 = np.float32(img)\n\n min_b = 0.35\n max_b = 1.5\n\n half = brightness / 2.0\n alpha = max(min_b, 1 - half)\n beta = min(max_b, alpha + brightness)\n delta = (beta - alpha) / float(w)\n\n t = alpha\n if direction % 2 == 0:\n t = beta\n delta = -1 * delta\n\n for i in range(w):\n t += delta\n img2[:, i, :] = img2[:, i, :] * t\n\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def set_ref_density(self, ref_density):\n self.ref_density = ref_density", "def setColorIndex(idx):\n dislin.setclr(idx)", "def setAxisBackground(idx=-1):\n dislin.axsbgd(idx)", "def late_gradient_fusion():\n pass", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def _grad_pen(self, coeffs):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n grad = np.zeros(2 * n_features)\n # Gradient of lasso penalization\n grad += l_elastic_net * (1 - eta)\n # Gradient of ridge penalization\n grad_pos = (l_elastic_net * eta)\n grad[:n_features] += grad_pos * coeffs\n grad[n_features:] -= grad_pos * coeffs\n return grad", "def gradualShadeV(img, brightness, direction=0, min_b=0.35, max_b=1.5):\n h, _, _ = img.shape\n img2 = np.float32(img)\n\n half = brightness / 2.0\n alpha = max(min_b, 1 - half)\n beta = min(max_b, alpha + brightness)\n delta = (beta - alpha) / float(h)\n\n t = alpha\n if direction % 2 == 0:\n t = beta\n delta = -1 * delta\n\n for j in range(h):\n t += delta\n img2[j, :, :] = img2[j, :, :] * t\n\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def compute_gradient_for_all(self):\r\n\r\n # YOUR CODE HERE\r\n self.compute_gradient_for_subset(0, self.DATAPOINTS)", "def _cal_color(xy, norm=False):\n color = gaussian_kde(xy)(xy)\n if norm:\n cmin = np.min(color)\n cmax = np.max(color)\n color = (color - cmin) / (cmax - cmin)\n\n return color", "def app_SN_animated_gradient_plot(self):\n print('this option is yet to be implemented')", "def early_gradient_fusion():\n pass", "def auto_density(color):\r\n\r\n blue_density = ansi_density(color, ANSI_STATUS_T_BLUE)\r\n green_density = ansi_density(color, ANSI_STATUS_T_GREEN)\r\n red_density = ansi_density(color, ANSI_STATUS_T_RED)\r\n \r\n densities = [blue_density, green_density, red_density]\r\n min_density = min(densities)\r\n max_density = max(densities)\r\n density_range = max_density - min_density\r\n \r\n # See comments in density_standards.py for VISUAL_DENSITY_THRESH to\r\n # understand what this is doing.\r\n if density_range <= VISUAL_DENSITY_THRESH:\r\n return ansi_density(color, ISO_VISUAL)\r\n elif blue_density > green_density and blue_density > red_density:\r\n return blue_density\r\n elif green_density > blue_density and green_density > red_density:\r\n return green_density\r\n else:\r\n return red_density", "def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass", "def isothermal_depth_d20(da_PT):\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT.interp(z=np.arange(0,da_PT.z.max(),2.5))\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # remove values smaller than critcal value\n da_interp = da_interp.where(da_interp>20,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_interp.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_interp.z[z_ind]*da_mask\n\n return da_itd", "def gradient(self, x):\n pass", "def add_alpha(image_data):\n\n # get hsv image\n hsv = rgb_to_hsv(image_data[:, :, :3].astype(float) / 255)\n\n # create new image and set alpha channel\n new_image_data = np.zeros(image_data.shape)\n new_image_data[:, :, 3] = hsv[:, :, 2]\n\n # set value of hsv image to either 0 or 1.\n hsv[:, :, 2] = np.where(hsv[:, :, 2] > 0, 1, 0)\n\n # combine alpha and new rgb\n new_image_data[:, :, :3] = hsv_to_rgb(hsv)\n return new_image_data", "def GetGradientOpacityArray(self, p_int):\n ..." ]
[ "0.57073355", "0.57073355", "0.56205153", "0.5285244", "0.52843755", "0.5108466", "0.5072132", "0.50284564", "0.49939755", "0.49933887", "0.49889076", "0.4987362", "0.4982678", "0.4981954", "0.49721134", "0.49690628", "0.49479908", "0.49206397", "0.4919995", "0.49167046", "0.4914512", "0.4898455", "0.489628", "0.4862366", "0.48577794", "0.48570338", "0.48431027", "0.48421273", "0.48335558", "0.48289496" ]
0.65256983
0
Convert old verbosity to the new one.
def convert_verbose_to_new(verbose): # In case the new verbosity is used, convert to the old one. if verbose is None: verbose=0 if not isinstance(verbose, str) and verbose<10: status_map = { 'None': 'silent', 0: 'silent', 6: 'silent', 1: 'critical', 2: 'warning', 3: 'info', 4: 'debug', 5: 'debug'} if verbose>=2: print('[colourmap] WARNING use the new verbose status. This will be deprecated in future versions.') return status_map.get(verbose, 0) else: return verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def test_increase_verbosity(self):\n # Start from a known state.\n set_level(logging.INFO)\n assert get_level() == logging.INFO\n # INFO -> VERBOSE.\n increase_verbosity()\n assert get_level() == logging.VERBOSE\n # VERBOSE -> DEBUG.\n increase_verbosity()\n assert get_level() == logging.DEBUG\n # DEBUG -> SPAM.\n increase_verbosity()\n assert get_level() == logging.SPAM\n # SPAM -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET\n # NOTSET -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET", "def set_verbosity(self, verbosity):\n if verbosity == 0:\n self.__logger.setLevel(logging.CRITICAL)\n if verbosity == 1:\n self.__logger.setLevel(logging.ERROR)\n if verbosity == 2:\n self.__logger.setLevel(logging.WARNING)\n if verbosity == 3:\n self.__logger.setLevel(logging.INFO)\n if verbosity >= 4:\n self.__logger.setLevel(logging.DEBUG)", "def test_decrease_verbosity(self):\n # Start from a known state.\n set_level(logging.INFO)\n assert get_level() == logging.INFO\n # INFO -> NOTICE.\n decrease_verbosity()\n assert get_level() == logging.NOTICE\n # NOTICE -> WARNING.\n decrease_verbosity()\n assert get_level() == logging.WARNING\n # WARNING -> SUCCESS.\n decrease_verbosity()\n assert get_level() == logging.SUCCESS\n # SUCCESS -> ERROR.\n decrease_verbosity()\n assert get_level() == logging.ERROR\n # ERROR -> CRITICAL.\n decrease_verbosity()\n assert get_level() == logging.CRITICAL\n # CRITICAL -> CRITICAL.\n decrease_verbosity()\n assert get_level() == logging.CRITICAL", "def set_verbosity(self,verbosity):\n type_name = type(verbosity).__name__\n if re.search('int',type_name) != None:\n \n # It is an integer, tes bounds\n if verbosity < 4 and verbosity > -1:\n self.verbosity = verbosity\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be either 0, 1, 2 or 3.\")\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be an integer.\")", "def turn_on_verbosity(self):\n self.m.setParam('OutputFlag', 1)", "def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value", "def set_verbose(verbosity: bool) -> None:\n global VERBOSE # pylint: disable=global-statement\n VERBOSE = verbosity", "def set_verbosity():\n\n\tif conf.verbose is None:\n\t\tconf.verbose = 1\n\n\tconf.verbose = int(conf.verbose)\n\n\tif conf.verbose == 0:\n\t\tlogger.setLevel(logging.ERROR)\n\telif conf.verbose == 1:\n\t\tlogger.setLevel(logging.INFO)\n\telif conf.verbose == 2:\n\t\tlogger.setLevel(logging.DEBUG)\n\telif conf.verbose == 3:\n\t\tlogger.setLevel(CUSTOM_LOGGING.PAYLOAD)\n\telif conf.verbose == 4:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)\n\telif conf.verbose >= 5:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)", "def SetVerbose(new_verbose=True):\n global _verbose\n _verbose = new_verbose", "def verbose(value=None):\n global verbosity\n\n if value != None:\n verbosity = value\n \n try:\n rval = verbosity\n except NameError:\n verbosity = False\n rval = verbosity\n\n return rval", "def toggleVerbose(self):\n self.__VERBOSE = not self.__VERBOSE", "def _set_verbosity_level0(self):\n pass", "def setVerbose(newVal):\n global verbose\n verbose = newVal", "def verbose():\n return Verbose.level()", "def verbose(self, value):\n if value > self.DEBUG:\n value = self.DEBUG\n if value < self.NONE:\n value = self.NONE\n self._verbose = value", "def set_verbosity_level(self, verbosity_level: int) -> bool:\n _enum_values = [item.value for item in VerbosityLevel]\n if verbosity_level in _enum_values:\n # Set verbosity level\n self._verbosity_level = verbosity_level\n\n # Switcher\n _switch = {\n 0: self._set_verbosity_level0,\n 1: self._set_verbosity_level1,\n 2: self._set_verbosity_level2\n }\n\n # Set logging level based on verbosity set\n _switch.get(verbosity_level, None)()\n\n return True\n else:\n return False", "def verbosity(self):\n return self._get('verbosity')", "def verbosity(self):\n return self._verbosity", "def __init__(self, verbosity: int = max_verbosity):\n self.verbosity = verbosity", "def _do_option(self, line: str) -> None:\n if line.startswith(\"option verbosity\"):\n self._verbosity = Level(int(line[len(\"option verbosity \") :]))\n _write(\"ok\")\n else:\n _write(\"unsupported\")", "def execute_at_different_verbosity(new_verbosity_threshold, func, *args, **kwargs):\r\n\r\n global verbosity_threshold\r\n old_verbosity_threshold = verbosity_threshold\r\n verbosity_threshold = new_verbosity_threshold\r\n\r\n try:\r\n return_value = func(*args, **kwargs)\r\n return return_value\r\n finally:\r\n verbosity_threshold = old_verbosity_threshold", "def _set_verbosity_level2(self):\n logging.basicConfig( filename=LOGGING_FILE_NAME, \\\n filemode='w', \\\n format='%(asctime)s - %(levelname)s \\t- %(message)s', \\\n level=logging.DEBUG )", "def _do_set_verbose(self, args):\r\n verbose = int(args[1])\r\n self.server.set_verbose(verbose)\r\n return \"%d\" % verbose", "def setVerbose(self, v):\n return self._set(verbose=v)", "def _override_opt(self, new_opt):\n model_args = {\n 'arch',\n 'encoder-embed-dim',\n 'encoder-layers',\n 'decoder-embed-dim',\n 'decoder-layers',\n 'decoder-out-embed-dim',\n 'decoder-attention',\n }\n\n for k, v in new_opt.items():\n if k not in model_args:\n # skip non-model args\n continue\n if k not in self.opt:\n print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))\n elif self.opt[k] != v:\n print('Overriding option [ {k}: {old} => {v}]'.format(\n k=k, old=self.opt[k], v=v))\n self.opt[k] = v\n return self.opt", "def setVerbose(*args,**kwargs):\n verbose = args[0] if args else True\n if verbose:\n verbositySampleTools = 2\n verbosityPlotTools = 2\n verbosityVariableTools = 2\n verbositySelectionTools = 2\n verbosityWJ = 2", "def _ansible_verbose(verbose_level=1):\n flag = ''\n if verbose_level > 1:\n flag = f'-{\"v\" * (verbose_level - 1)}'\n return flag", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def _verbosity_filter(index, verbose):\r\n if not verbose:\r\n return True\r\n elif verbose > 10:\r\n return False\r\n if index == 0:\r\n return False\r\n verbose = .5 * (11 - verbose) ** 2\r\n scale = sqrt(index / verbose)\r\n next_scale = sqrt((index + 1) / verbose)\r\n return (int(next_scale) == int(scale))" ]
[ "0.6408217", "0.6038145", "0.60295814", "0.5947771", "0.5929938", "0.5909877", "0.5623561", "0.5614202", "0.55986595", "0.55324614", "0.55215126", "0.55148864", "0.54819477", "0.5481862", "0.5461689", "0.5380808", "0.53354967", "0.531541", "0.52798617", "0.52721906", "0.5259722", "0.5252118", "0.5242256", "0.52028173", "0.5194103", "0.5169674", "0.51673734", "0.5166024", "0.51612496", "0.51585025" ]
0.74339193
0
Return size of folder at path.
def folder_size(path): return sum(getsize(f) for f in os.listdir('.') if isfile(f))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def folder_size(path: str) -> str:\r\n return (\r\n subprocess.check_output([\"du\", \"-sh\", \"-B1\", path]).split()[0].decode(\"utf-8\")\r\n )", "def get_size_from_path(path):\n if not os.path.isdir(path):\n return os.path.getsize(path)\n\n size = 0\n\n for dirname, dirs, files in os.walk(path):\n for f in files:\n size += os.path.getsize(dirname + '/' + f)\n\n return size", "def getsize(self, path):\n return os.path.getsize(path)", "def getsize(path):\n return stat(path).st_size", "def get_folder_size(start_path): # source http://stackoverflow.com/a/1392549\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size", "def size(path):", "def getsize(path):\n return get_instance(path).getsize(path)", "def filesize(self, path):\n arinfo = self._handle.getmember(path)\n return arinfo.size", "def filesize(self, path):\n arinfo = self._handle.getmember(path)\n return arinfo.size", "def get_tree_size(path):\r\n size = 0\r\n try:\r\n for entry in scandir.scandir(path):\r\n if entry.is_dir():\r\n size += get_tree_size(os.path.join(path, entry.name))\r\n else:\r\n size += entry.lstat().st_size\r\n except OSError:\r\n pass\r\n return size", "def get_folder_size(start_path):\r\n total_size = 0\r\n for dirpath, dirnames, filenames in os.walk(start_path):\r\n for f in filenames:\r\n fp = os.path.join(dirpath, f)\r\n total_size += os.path.getsize(fp)\r\n\r\n return total_size / 1000000", "def get_file_size(path):\n\n return os.stat(path).st_size", "def get_file_size(path: str):\n return os.path.getsize(path)", "def get_size(file_path):\n size = 0\n if os.path.isdir(file_path):\n for root, dirs, files in os.walk(file_path):\n for f in files:\n size += os.path.getsize(os.path.join(root, f))\n elif os.path.isfile(file_path):\n size = (os.path.getsize(file_path))\n return size", "def getDirectorySize( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.isDirectory: Attempting to determine whether %s paths are directories.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n res = serviceClient.getDirectorySize( url )\n if not res['OK']:\n failed[url] = res['Message']\n else:\n successful[url] = {'Files':0, 'Size':res['Value'], 'SubDirs':0}\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def filesize(self, path):\n try:\n return len(self.extract(path, None))\n except Exception as e:\n return 0", "def dir_size(start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if os.path.exists(fp):\n try:\n total_size += os.path.getsize(fp)\n except:\n continue\n # convert to MB\n return int(total_size * 1.0 / 10000000)", "def dir_size(dir_path):\n def fsize(path):\n target = path.resolve()\n if target.is_file():\n return target.stat().st_size\n else:\n return 0\n return sum(fsize(child) for child in dir_path.iterdir())", "def get_tree_size(path):\n total = 0\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n total += get_tree_size(entry.path)\n else:\n total += entry.stat(follow_symlinks=False).st_size\n return total", "def filesize(self, path):\n return self._handle.getinfo(path).file_size", "def filesize(self, path):\n return self._handle.getinfo(path).file_size", "def get_size(path):\n return str(os.path.getsize(path)/1024) + 'kb'", "def get_size(start_path='.'):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size", "def get_size_directory(path):\n command = [\"du\", path, '-s']\n s = subprocess.run(command, capture_output=True)\n output = s.stdout.decode('UTF-8')\n if len(output) != 0:\n kbytes = int(output.split('\\t')[0])\n else:\n kbytes = -1\n return kbytes", "def _disk_usage(path: pathlib.Path):\n if path.is_file():\n return path.stat().st_size\n elif path.is_dir():\n size_bytes = 0\n for file in path.iterdir():\n size_bytes += _disk_usage(file)\n return size_bytes\n else:\n raise NotImplementedError(\"What filetype is {file}?\")", "def countBytes(path):\r\n\tcount = 0 \r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tcount += os.path.getsize(element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tcount += countBytes(os.getcwd())\r\n\t\t\tos.chdir(\"..\")\r\n\treturn count", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def getFolderSize(folder):\n\n total_size = os.path.getsize(folder)\n for item in os.listdir(folder):\n itempath = os.path.join(folder, item)\n if os.path.isfile(itempath):\n total_size += os.path.getsize(itempath)\n elif os.path.isdir(itempath):\n total_size += getFolderSize(itempath)\n return total_size", "def file_size(file_path):\n \n stat = os.stat(file_path)\n assert stat_fn.S_ISDIR(stat.st_mode) == False\n return stat.st_size", "def get_dirsize(start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size" ]
[ "0.8283314", "0.8165242", "0.80046517", "0.78010786", "0.77946424", "0.7767517", "0.77278864", "0.77143127", "0.77143127", "0.769937", "0.7691966", "0.7543926", "0.7538644", "0.7486155", "0.7480786", "0.7466756", "0.74297", "0.74095184", "0.73598933", "0.7322922", "0.7322922", "0.7294346", "0.7293894", "0.72788", "0.72730577", "0.7271591", "0.72666967", "0.72371125", "0.7222061", "0.722148" ]
0.8572386
0
Print out debugging information string string to be printed (in)
def debug(string): if verbose: print string return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(string):\n if conf.DEBUG:\n outputs.print_debug(string)", "def output_debug_info(self):", "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "def debug_string(self):\n\n raise NotImplementedError", "def debug():", "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "def debug(msg):", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def print_debug(message: str):\n global debug\n if debug:\n print(\"%s%s%s\" % (KCYN, message, KNRM))", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debug(self, message):\r\n pass", "def debug(debug_string, to_debug):\n if to_debug:\n print(\"DEBUG {0}: {1}\".format(strftime('%H:%M:%S'), debug_string))", "def print_debug(msg):\n if IS_DEBUG:\n print(msg)", "def Debug(DebugStr):\n\tsys.stderr.write(DebugStr + '\\n')", "def _debuginfo(self,suspect,message):\n suspect.debug(message)\n self.logger.debug(message)", "def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)", "def _debug_print(message):\n\n if _debug == True:\n print(message)", "def debug(node):\n print \"%r\" % node", "def output_debug(text):\n if conf.debug:\n output_message('[DEBUG] ' + text)", "def debug(msg):\n if settings.DEBUG:\n print \"DEBUG: cli.%(msg)s\" % locals()", "def debug(self, msg):\n if self._debug:\n print \"%s\" % (msg)", "def debug_print(input_data, debug_flag):\n if debug_flag:\n if input_data:\n #print(\"################################################ debug_print #############################################################\")\n for item in input_data:\n print(\" {0:<60}\".format(item))\n #print(\"##############################################################################################################################\")\n else:\n print(\" {0:<60}\".format(input_data))", "def debug(self, s, level=1):\n if self._debug >= level:\n print(s)", "def pr(string, verbose):\n if(verbose):\n print(string)" ]
[ "0.76916933", "0.7513169", "0.7507592", "0.73979014", "0.73887455", "0.7374867", "0.7366604", "0.7354347", "0.7354347", "0.7234283", "0.7234283", "0.71581537", "0.70975", "0.7073775", "0.7070248", "0.7070248", "0.7016923", "0.6961993", "0.6890409", "0.68704146", "0.6854564", "0.6853472", "0.6843488", "0.6821009", "0.6814725", "0.6805802", "0.67897284", "0.6760925", "0.67602783", "0.6747818" ]
0.7761138
0