query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test in filter on a choice field using an enum (Reporter.reporter_type). | def test_enum_in_filter(query):
Reporter.objects.create(
first_name="John", last_name="Doe", email="[email protected]", reporter_type=1
)
Reporter.objects.create(
first_name="Jean", last_name="Bon", email="[email protected]", reporter_type=2
)
Reporter.objects.create(
first_name="Jane", last_name="Doe", email="[email protected]", reporter_type=2
)
Reporter.objects.create(
first_name="Jack", last_name="Black", email="[email protected]", reporter_type=None
)
schema = Schema(query=query)
query = """
query {
reporters (reporterType_In: [A_1]) {
edges {
node {
email
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["reporters"]["edges"] == [
{"node": {"email": "[email protected]"}},
]
query = """
query {
reporters (reporterType_In: [A_2]) {
edges {
node {
email
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["reporters"]["edges"] == [
{"node": {"email": "[email protected]"}},
{"node": {"email": "[email protected]"}},
]
query = """
query {
reporters (reporterType_In: [A_2, A_1]) {
edges {
node {
email
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["reporters"]["edges"] == [
{"node": {"email": "[email protected]"}},
{"node": {"email": "[email protected]"}},
{"node": {"email": "[email protected]"}},
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_choice_in_filter_without_enum(query):\n\n john_doe = Reporter.objects.create(\n first_name=\"John\", last_name=\"Doe\", email=\"[email protected]\"\n )\n jean_bon = Reporter.objects.create(\n first_name=\"Jean\", last_name=\"Bon\", email=\"[email protected]\"\n )\n documentary_film = Film.objects.create(genre=\"do\")\n documentary_film.reporters.add(john_doe)\n action_film = Film.objects.create(genre=\"ac\")\n action_film.reporters.add(john_doe)\n other_film = Film.objects.create(genre=\"ot\")\n other_film.reporters.add(john_doe)\n other_film.reporters.add(jean_bon)\n\n schema = Schema(query=query)\n\n query = \"\"\"\n query {\n films (genre_In: [\"do\", \"ac\"]) {\n edges {\n node {\n genre\n reporters {\n edges {\n node {\n lastName\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n result = schema.execute(query)\n assert not result.errors\n assert result.data[\"films\"][\"edges\"] == [\n {\n \"node\": {\n \"genre\": \"do\",\n \"reporters\": {\"edges\": [{\"node\": {\"lastName\": \"Doe\"}}]},\n }\n },\n {\n \"node\": {\n \"genre\": \"ac\",\n \"reporters\": {\"edges\": [{\"node\": {\"lastName\": \"Doe\"}}]},\n }\n },\n ]",
"def test_radioselect_field():",
"def test_select_field():",
"def report_type_choices():\n\n rts = report_types()\n rcs = report_categories()\n return [(c, [(rt.report_type, rt.name) for rt in rts if rt.category == c]) for c in rcs]",
"def test_validate_choices_ok(self, choices, value):\n opt = scheme.Option('test-option', choices=choices)\n opt.validate('foo', value)",
"def test_validate_type_ok(self, field_type, value):\n opt = scheme.Option('test-option', field_type=field_type)\n opt.validate('foo', value)",
"def test_should_choice_convert_string():\n assert_conversion(forms.ChoiceField, Int)",
"def test_choices_from_facets(self):\n fake_facets = {\n \"doctype\": {\"foo\": 1, \"bar\": 2, \"baz\": 3},\n \"has_transcription\": {\"true\": 3, \"false\": 3},\n }\n form = DocumentSearchForm()\n # call the method to configure choices based on facets\n form.set_choices_from_facets(fake_facets)\n # test doctype facets (FacetChoiceField)\n for choice in form.fields[\"doctype\"].widget.choices:\n # choice is index id, label\n choice_label = choice[1]\n assert isinstance(choice_label, str)\n assert \"<span>\" in choice_label\n # test has_transcription facet (BooleanFacetField)\n bool_label = form.fields[\"has_transcription\"].label\n assert isinstance(bool_label, str)\n assert \"3</span>\" in bool_label",
"def test_lookup_with_dynamic_value(self):\n modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)\n\n def _test_choices(request, expected_displays):\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n self.assertEqual(filterspec.title, \"publication decade\")\n choices = tuple(c[\"display\"] for c in filterspec.choices(changelist))\n self.assertEqual(choices, expected_displays)\n\n _test_choices(\n self.request_factory.get(\"/\", {}), (\"All\", \"the 1980's\", \"the 1990's\")\n )\n\n _test_choices(\n self.request_factory.get(\"/\", {\"publication-decade\": \"the 80s\"}),\n (\"All\", \"the 1990's\"),\n )\n\n _test_choices(\n self.request_factory.get(\"/\", {\"publication-decade\": \"the 90s\"}),\n (\"All\", \"the 1980's\"),\n )",
"def filter_generation_type(self, what):\n return self.form.set_value('generation type', what)",
"def test_request_report_enums_accepted(\n self, api_instance: Reports, report_type, marketplace_id\n ):\n params = api_instance.request_report(\n report_type=report_type,\n marketplace_ids=marketplace_id,\n )\n assert params[\"ReportType\"] == \"_GET_FLAT_FILE_OPEN_LISTINGS_DATA_\"\n assert params[\"MarketplaceIdList.Id.1\"] == \"ATVPDKIKX0DER\"",
"def __test_choice(L, S, A, R, Y):\n return choice(Y)",
"def test_award_type_filter():\n baker.make(\n RecipientProfile,\n recipient_level=\"A\",\n recipient_hash=\"00077a9a-5a70-8919-fd19-330762af6b84\",\n recipient_unique_id=\"000000123\",\n recipient_name=\"SHOES AND SOCKS INC.\",\n last_12_months=2400.00,\n last_12_contracts=400.00,\n last_12_grants=500.00,\n last_12_loans=0.00,\n last_12_other=700.00,\n last_12_direct_payments=800.00,\n award_types=[\"contract\", \"grant\", \"direct payment\", \"other\"],\n )\n baker.make(\n RecipientProfile,\n recipient_level=\"B\",\n recipient_hash=\"c8f79139-38b2-3063-b039-d48172abc710\",\n recipient_unique_id=\"000000444\",\n recipient_name=\"SPORT SHORTS\",\n last_12_months=2000.00,\n last_12_contracts=700.00,\n last_12_grants=600.00,\n last_12_loans=0.00,\n last_12_other=400.00,\n last_12_direct_payments=300.00,\n award_types=[\"contract\", \"grant\", \"direct payment\", \"other\"],\n )\n baker.make(\n RecipientProfile,\n recipient_level=\"C\",\n recipient_hash=\"5770e860-0f7b-69f1-182f-4d6966ebaa62\",\n recipient_unique_id=\"000000555\",\n recipient_name=\"JUST JERSEYS\",\n last_12_months=99.99,\n last_12_contracts=0.00,\n last_12_grants=0.00,\n last_12_loans=99.99,\n last_12_other=0.00,\n last_12_direct_payments=0.00,\n award_types=[\"loans\"],\n )\n\n filters = {\"limit\": 10, \"page\": 1, \"order\": \"desc\", \"sort\": \"amount\", \"award_type\": \"all\"}\n results, meta = get_recipients(filters=filters)\n\n # \"all\"\n assert len(results) == 3\n assert results[0][\"recipient_level\"] == \"A\"\n assert float(results[0][\"amount\"]) == float(2400)\n assert results[0][\"id\"] == \"00077a9a-5a70-8919-fd19-330762af6b84-A\"\n\n # Test \"grants\"\n filters[\"award_type\"] = \"grants\"\n results, meta = get_recipients(filters=filters)\n assert len(results) == 2\n assert results[0][\"recipient_level\"] == \"B\"\n assert float(results[0][\"amount\"]) == float(600)\n assert results[0][\"id\"] == \"c8f79139-38b2-3063-b039-d48172abc710-B\"\n\n # Test \"contracts\"\n filters[\"award_type\"] = \"contracts\"\n results, meta = get_recipients(filters=filters)\n assert len(results) == 2\n assert results[0][\"recipient_level\"] == \"B\"\n assert float(results[0][\"amount\"]) == float(700)\n assert results[0][\"id\"] == \"c8f79139-38b2-3063-b039-d48172abc710-B\"\n\n # Test \"direct_payments\"\n filters[\"award_type\"] = \"direct_payments\"\n results, meta = get_recipients(filters=filters)\n assert len(results) == 2\n assert results[0][\"recipient_level\"] == \"A\"\n assert float(results[0][\"amount\"]) == float(800)\n assert results[0][\"id\"] == \"00077a9a-5a70-8919-fd19-330762af6b84-A\"\n\n # Test \"loans\"\n filters[\"award_type\"] = \"loans\"\n results, meta = get_recipients(filters=filters)\n assert len(results) == 1\n assert results[0][\"recipient_level\"] == \"C\"\n assert float(results[0][\"amount\"]) == float(99.99)\n assert results[0][\"id\"] == \"5770e860-0f7b-69f1-182f-4d6966ebaa62-C\"",
"def test_api_type_filtering(api_client, by_type, by_state):\n response = api_client.get(path='/breweries', params={'by_type': by_type, 'by_state': by_state})\n assert response.json() != []\n assert response.ok",
"def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }",
"def test_lookup_with_non_string_value(self):\n modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)\n request = self.request_factory.get(\"/\", {\"department\": self.john.department.pk})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n queryset = changelist.get_queryset(request)\n\n self.assertEqual(list(queryset), [self.john])\n\n filterspec = changelist.get_filters(request)[0][-1]\n self.assertEqual(filterspec.title, \"department\")\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[1][\"display\"], \"DEV\")\n self.assertIs(choices[1][\"selected\"], True)\n self.assertEqual(\n choices[1][\"query_string\"], \"?department=%s\" % self.john.department.pk\n )",
"def test_validate_available_choice_1(self):\n self.assertRaises(\n InvalidStatusOperationError,\n validate_available_choice,\n *(BeerStyle, \"Not an int\")\n )",
"def validate_available_choice(enum, to_value):\n if to_value is not None and not to_value in dict(enum.choices()).keys():\n raise InvalidStatusOperationError(_(u'Select a valid choice. %(value)s is not one of the available choices.') % {'value': to_value})",
"def getRecordFilter(self):\n return Q(**{\"report__report_type\": self.value})",
"def event_sub_cond_field_choices(self): \n event_fields = [\n 'no filters',\n 'content_completion_pct',\n 'component__left_nav_section',\n 'component__utility_tile__id',\n 'dest_page__category_slug',\n 'content_id',\n 'program_id',\n 'page_type',\n 'dest_page_type',\n 'container_id',\n 'container_slug',\n 'query',\n 'manip',\n 'auth_type',\n 'current_auth_type',\n 'status',\n 'dialog_type'\n ]\n return event_fields",
"def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence",
"def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return",
"def handle_choice(self, choice):\n if choice not in self.choices:\n error = \"Invalid Choice please choose from {}\".format(\n \", \".join(self.choices)\n )\n return error\n return choice",
"def test_validate_available_choice_2(self):\n self.assertRaises(\n InvalidStatusOperationError,\n validate_available_choice,\n BeerStyle,\n str(BeerStyle.LAGER.value),\n )",
"def formfield_for_choice_field(self, db_field, request=None, **kwargs):\n if request:\n choices_list = STATUS_CHOICES\n if getattr(settings, 'NEWS_APPROVAL_SYSTEM', False) and not request.user.has_perm('news.can_approve_articles'):\n choices_list = [x for x in STATUS_CHOICES if x[0] != 'approved']\n\n if db_field.name == 'status':\n kwargs['choices'] = choices_list\n\n return super(ArticleAdmin, self).formfield_for_choice_field(db_field, request, **kwargs)",
"def test_test_enum_parameters(self):\n pass",
"def test_single_choice_question_possibilities(self):\n my_step = WorkflowStep.objects.get(\n workflow__workflowcollectionmember__workflow_collection=self.my_collection\n )\n my_step_input = WorkflowStepUserInput.objects.get(workflow_step=my_step)\n my_step_input.type.name = \"single_choice_question\"\n my_step_input.type.save()\n # Ensure if correct answer is required, that the enum is just the correct answer\n self.assertEquals(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"anyOf\"],\n [{\"type\": \"string\"}, {\"type\": \"number\"}],\n )\n self.assertEqual(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"enum\"], [\"Red\"]\n )\n\n # Change the example so that the correct answer isn't required\n my_step_input.specification = {\n \"label\": \"What is your favorite color?\",\n \"inputOptions\": [\"Red\", \"Blue\"],\n \"correctInput\": \"Red\",\n \"meta\": {\"inputRequired\": True, \"correctInputRequired\": False},\n }\n my_step_input.save()\n\n # Ensure that if the correct answer isn't required, we have all options in the enum\n self.assertEquals(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"anyOf\"],\n [{\"type\": \"string\"}, {\"type\": \"number\"}],\n )\n self.assertEqual(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"enum\"],\n [\"Red\", \"Blue\"],\n )\n\n # Change the example so that neither input nor correct input is required\n my_step_input.specification = {\n \"label\": \"What is your favorite color?\",\n \"inputOptions\": [\"Red\", \"Blue\"],\n \"correctInput\": \"Red\",\n \"meta\": {\"inputRequired\": False, \"correctInputRequired\": False},\n }\n my_step_input.save()\n\n # Ensure that if neither are required, we have all options in the enum + None\n self.assertEquals(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"anyOf\"],\n [{\"type\": \"string\"}, {\"type\": \"number\"}, {\"type\": \"null\"}],\n )\n self.assertEqual(\n my_step_input.response_schema[\"properties\"][\"userInput\"][\"enum\"],\n [\"Red\", \"Blue\", None],\n )",
"def test_choices(Class, choices_in, choices_out):\n attribute = Class(\"test\", choices=choices_in)\n assert attribute.choices == choices_out",
"def test_filter_on_subtype(authenticated_client):\n observable_json = {'value': r'asd[0-4]\\.com'}\n rv = authenticated_client.post('/api/observables/filter/',\n data=json.dumps(observable_json),\n content_type='application/json')\n response = json.loads(rv.data)\n for item in response:\n assert re.match(r'asd[0-4]\\.com', item['value'])\n assert item['type'] == 'domain-name'",
"def _find_option_with_choice(self, inputfield, choice):\r\n for option in self._find_options(inputfield):\r\n if option['choice'] == choice:\r\n return option"
] | [
"0.6124881",
"0.5575385",
"0.54288995",
"0.54145145",
"0.5324439",
"0.53120065",
"0.53093195",
"0.52942735",
"0.52369535",
"0.5213601",
"0.5205873",
"0.5193913",
"0.5182427",
"0.5172175",
"0.51323706",
"0.51200104",
"0.5100281",
"0.50874496",
"0.50767154",
"0.50623184",
"0.5051968",
"0.5049539",
"0.5044699",
"0.50376415",
"0.50349313",
"0.5027055",
"0.4976063",
"0.49618006",
"0.49606064",
"0.49531198"
] | 0.56262904 | 1 |
Handle GET requests to park ProductCategorys resource | def list(self, request):
product_category = ProductCategory.objects.all()
# Support filtering ProductCategorys by area id
# name = self.request.query_params.get('name', None)
# if name is not None:
# ProductCategories = ProductCategories.filter(name=name)
serializer = ProductCategorySerializer(
product_category, many=True, context={'request': request})
return Response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retrieve(self, request, pk=None):\n try:\n category = ProductCategory.objects.get(pk=pk)\n serializer = ProductCategorySerializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def categories(request, pk):\n if Category.objects.filter(item_type_id=pk).exists(): # Checks if product_category exists with given id.\n all_categories = Category.objects.filter(item_type_id=pk)\n else:\n return Response(messages.CATEGORY_ITEM_DOES_NOT_EXIST, status=status.HTTP_404_NOT_FOUND)\n if request.method == 'GET':\n category_serializer = CategorySerializer(all_categories, many=True)\n\n return Response(category_serializer.data[::-1], status=status.HTTP_200_OK)",
"def rest_get_catalogue_handler():\n cats = category.get_all_categories()\n items = item.get_all_items()\n result = {}\n result['categories'] = [c.serialize for c in cats]\n result['items'] = [i.serialize for i in items]\n return jsonify(result)",
"def item_categories(request):\n all_item_categories = ItemType.objects.all() # Get all product_categories\n if request.method == 'GET':\n if all_item_categories:\n item_category_serializer = ItemCategorySerializer(all_item_categories, many=True)\n return Response(item_category_serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(messages.EMPTY_PRODUCT_CATEGORIES, status=status.HTTP_204_NO_CONTENT)",
"def browse_category(request):\n\n result = {'categories':[], 'products':[]}\n\n u = request.user\n\n page = request.POST.get('page', 1)\n\n cat = Category.objects.get(id=request.POST['cat_id'])\n if cat.children.count() > 0:\n result['categories'] = [c.get_json() for c in cat.children.all()]\n else:\n # display items\n result = Product.objects.filter_category(cat.category_id, page, u) \n\n return JSONHttpResponse(result)",
"def test_list_products_filtered_by_category(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?category=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')",
"def category_list(request):\n if request.method == 'GET':\n categories = get_list_or_404(Category, is_active=True)\n if request.GET.get('pagination'):\n pagination = request.GET.get('pagination')\n if pagination == 'true':\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(categories, request)\n serializer = CategorySerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n serializer = CategorySerializer(categories, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)",
"def retrieve(self, request, pk=None):\n try:\n category = Categories.objects.get(pk=pk)\n serializer = CategoriesSerializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def get_all_categories_of_product(key):\n try:\n product = Products.objects.get(pk=key, is_delete=False)\n except ObjectDoesNotExist:\n return Response({'status': PRODUCT_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n if product.category_ids.all().exists():\n ser = CategoriesSerializer(product.category_ids.all(), many=True)\n return Response(ser.data, status=status.HTTP_200_OK)\n return Response({'status': PRODUCT_NOT_FOR_SUBSCRIPTION}, status=status.HTTP_200_OK)",
"def get_categories(request):\n return JsonResponse(get_all_objects(Category, CategorySerializer), safe=False)",
"def get(self, request, format=None):\n categories = Category.objects.all()\n data_out = CategorySerializer(categories, many=True)\n return Response(data_out.data)",
"def all_products(request):\n products = Product.objects.all()\n query = None\n categories = None\n sort= None\n direction = None\n\n if request.GET:\n\n # if 'sort' in request.GET:\n # sortkey = request.GET['sort']\n # if sortkey == \"price\":\n # products = products.order_by('-price').reverse()\n\n # if 'sort' in request.GET:\n # sortkey = request.GET['sort']\n # if sortkey == \"rating\":\n # products = products.order_by('-rating')\n\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n # Returns a list\n # categories = list(request.GET['category'].split(','))\n # Convert into list to compare below\n categories = request.GET['category'].split(',')\n # __name__: Looking for name field in category model since related by foreign key\n # category is present in products field but with number reference, this method allows us to use the actual name\n # instead of number by referencing the category model using foreign key in models.\n # using filter since object already queried\n # category_name obtained from foreignkey defined in Product model/lookups that span relationships\n # Obtaining query set for html(category__name: double undrscore since refering to foeignkey)\n # https://docs.djangoproject.com/en/3.1/topics/db/queries/#lookups-that-span-relationships\n # The __in refers to list. Returns all products with categories in category list as queryset\n # https://docs.djangoproject.com/en/3.1/topics/db/queries/#the-pk-lookup-shortcut\n products = products.filter(category__name__in=categories)\n # Get all categories where name in catgories list as a queryset\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n # Query is blank query= \"\"\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\")\n return redirect(reverse('products'))\n else:\n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n return render(request, 'products/products.html', context)",
"def getproductbycategory(self):\n\n eight = self.geteightcategories()\n productsliste = []\n for category in eight:\n for page in tqdm(range(1, 5)):\n r2 = requests.get(\n \"https://fr.openfoodfacts.org/cgi/search.pl?action=process\"\n + \"&tagtype_0=categories&tag_contains_0=contains&tag_0={}\".format(\n category\n )\n + \"&tag_contains_1=contains&tag_1=france&page_size=500\"\n + \"&fields=url,categories_tags_fr,product_name,stores_tags\"\n + \",nutriscore_grade&tagtype_1=purchase_places&sort_by=\"\n + \"unique_scans_n&json=1&page={}\".format(page)\n )\n dataproducts = r2.json()\n if dataproducts[\"page_count\"] is None:\n continue\n else:\n for items in dataproducts[\"products\"]:\n productsliste.append(items)\n\n self.rawproductdata = productsliste",
"def test_query_product_list_by_category(self):\n products = self._create_products(10)\n test_category = products[0].category\n category_products = [product for product in products if product.category == test_category]\n resp = self.app.get(\n \"/products\", query_string=\"category={}\".format(quote_plus(test_category))\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), len(category_products))\n # check the data just to be sure\n for product in data:\n self.assertEqual(product[\"category\"], test_category)",
"def _scratch_category(self, cat, n_page):\n payload = {\n \"action\": \"process\",\n \"tagtype_0\": \"categories\",\n \"tag_contains_0\": \"contains\",\n \"tag_0\": cat,\n \"tagtype_1\": \"nutrition_grade\",\n \"tag_contains_1\": \"contains\",\n \"fields\": \",\".join(PRODUCTS_FIELDS),\n \"page_size\": PAGE_SIZE,\n \"page\": n_page,\n \"json\": \"true\",\n }\n\n al_list = get(OFF_URL, params=payload).json()\n return al_list[\"products\"]",
"def getProducts():\n return \"http://api.tcgplayer.com/catalog/products\"",
"def handle(self, *args, **options):\r\n for category in APIInformation.CATEGORIES:\r\n self.get_categories(category)\r\n APIInformation.PARAMETERS['tag_0'] = category\r\n for tag in APIInformation.NUTRISCORE:\r\n APIInformation.PARAMETERS['tag_1'] = tag\r\n response = requests.get(APIInformation.PRODUCTS_LINK,\r\n params=APIInformation.PARAMETERS)\r\n products = response.json()\r\n\r\n self.get_products(products, category)",
"def category():\n kwargs = {k: parse(v) for k, v in request.args.to_dict().items()}\n return jsonify(objects=get_categories(**kwargs))",
"def items(request, pk):\n\n if Item.objects.filter(category_id=pk).exists(): # Checks if product_category exists with given id.\n all_items = Item.objects.filter(category_id=pk)\n else:\n return Response(messages.ITEMS_DOES_NOT_EXIST, status=status.HTTP_404_NOT_FOUND)\n if request.method == 'GET':\n item_serializer = ItemSerializer(all_items, many=True)\n return Response(item_serializer.data[::-1], status=status.HTTP_200_OK)",
"def get_product(self, category) -> list:\n try:\n page = 1\n list_list_product = []\n while True:\n query = requests.get(\n f\"https://fr.openfoodfacts.org/category/{category}.json?page={page}\"\n ).json()\n if int(query[\"count\"]) == 0:\n logger.info(\n f\"Aucune données dans l'API pour la catégory {category}\"\n )\n return list_list_product\n\n for product in query[\"products\"]:\n product_list = {}\n if (\n product.get(\"product_name_fr\")\n and product.get(\"image_url\")\n and product.get(\"stores\")\n and product.get(\"url\")\n and product.get(\"nutriscore_grade\")\n and product.get(\"image_nutrition_url\")\n and product.get(\"categories\")\n ):\n product_list[\"name\"] = product.get(\"product_name_fr\").strip()\n\n product_list[\"image_product\"] = product.get(\"image_url\")\n\n product_list[\"stores\"] = product.get(\"stores\", \"\").strip()\n\n product_list[\"url\"] = product.get(\"url\", \"\").strip()\n\n product_list[\"nutriscore\"] = product.get(\"nutriscore_grade\")\n\n product_list[\"image_reperes_nutrionnels\"] = product.get(\n \"image_nutrition_url\"\n )\n\n product_list[\"categories_product\"] = product.get(\n \"categories\", \"\"\n ).split(\",\")\n\n list_list_product.append(product_list)\n page += 1\n\n if query[\"page_count\"] == page:\n return list_list_product\n\n except AttributeError as e:\n logger.info(\"stop\")\n logger.error(\"Une erreur c'est produite pendant la requete GET\")\n logger.error(e)\n sys.exit()",
"def category(request, category_id, template_name='doppler/shift/catalog/category.haml'):\n category = get_object_or_404(Category, pk=category_id, enabled=True)\n products = category.enabled_products\n subcategories = category.children.filter(enabled=True)\n return render_to_response(\n template_name,\n {\n 'category': category,\n 'products': products,\n 'subcategories': subcategories,\n },\n context_instance=RequestContext(request))",
"def product_list(request):\n if request.method == 'GET':\n _products = Product.objects.all()\n serializer = ProductSerializer(_products, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)",
"def get_shop_products(request, slug, cat):\n try:\n shop = Shop.objects.get(slug=slug)\n products = Products.objects.filter(shop_rel=shop).order_by('?')\n shop_slugs = list(map(lambda x: x[\"slug\"], shop.categories))\n if not shop.categories:\n products = []\n elif cat in shop_slugs:\n products = products.filter(genre__slug=cat)\n paginator = pagination.PageNumberPagination()\n paginator.page_size = 12\n result_page = paginator.paginate_queryset(products, request=request)\n serializer = ProductSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n except shop.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)",
"def retrieve(self, request, pk=None):\n try:\n category = ItemCategory.objects.get(pk=pk)\n serializer = ItemCategorySerializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def _get_data(self, category):\n url = self._get_url(category)\n # request page\n page = get(url)\n # print page.content\n # exit()\n # raise an error\n # page.raise_for_status()\n # parse html markup\n html = html5parser.fragment_fromstring(page.content, True)\n divs = html.findall(\".//{http://www.w3.org/1999/xhtml}div\")\n json_string = \"\"\n # find the div that contains our catalog json file\n # TODO: everything request seems to have diffent html structure then specials find a method that works for both of them\n for i in range(0, len(divs)):\n d = divs[i]\n if d.text != None and \"COLRSCatalogEntryList\" in d.text:\n json_string = d.text\n break\n # parse the json\n # print \"json string is:\\n{}\".format(json_string)\n data_json = json.loads(json_string)\n self._search_data = data_json['searchInfo']\n self._product_data = data_json['products']\n # we return true on success\n return True",
"def get_products(self, data, category):\r\n for product_information in data['products']:\r\n name = product_information.get('product_name', None)\r\n # in order to remove linebreak from product name\r\n # print(\"WITH LINEBREAK : \", repr(name))\r\n if name:\r\n name = name.replace('\\n', '')\r\n # print(\"WITHOUT LINEBREAK : \", repr(name))\r\n category = Categories.objects.get(name=category)\r\n nutriscore = product_information.get('nutrition_grades', None)\r\n link = product_information.get('url', None)\r\n image = product_information.get('image_url', None)\r\n nutrition_image = product_information.get\\\r\n ('image_nutrition_url', None)\r\n if category is None \\\r\n or name is None \\\r\n or len(name) > 75 \\\r\n or nutriscore is None \\\r\n or link is None \\\r\n or image is None \\\r\n or nutrition_image is None:\r\n continue\r\n else:\r\n try:\r\n product, created = Products.objects.get_or_create(\r\n name=str(name),\r\n category=category,\r\n nutriscore=nutriscore,\r\n link=link,\r\n image=image,\r\n nutrition_image=nutrition_image,\r\n )\r\n if created:\r\n product.save()\r\n print(product.name)\r\n\r\n except Products.DoesNotExist:\r\n raise CommandError(\"Products %s could not been reached\"\r\n % name)\r\n except IntegrityError:\r\n continue",
"def get_product_data_off(self):\n list_products_name = []\n for x in self.list_categories: \n \"\"\"get products' data from openfoodfacts api with string as paramaters\"\"\"\n parameters = {\n 'action': 'process',\n 'json': 1,\n 'countries': 'France',\n 'page_size': 100,\n 'page': 1,\n 'tagtype_0': 'categories',\n 'tag_contains_0': 'contains',\n 'tag_0': x\n }\n r = requests.get('https://fr.openfoodfacts.org/cgi/search.pl',\n params=parameters) # passing parameters in URL\n print(r.url)\n data = r.json() # r. from requests module decodes json file\n products = data['products'] #access dictionnary items by referring to its key name, products ordered by id\n list_products_name.append(products) \n self.list_products = list_products_name # list_categories_name is passed in the instance property",
"def call(self):\r\n clean_products = []\r\n\r\n for category in CATEGORIES:\r\n print(f\"Chargement des produits de type {category}\")\r\n api_url = SEARCH_API_URL + \\\r\n (f\"?search_terms={category}\"\r\n \"&search_tag=category&sort_by=unique_scans_n\"\r\n \"&page_size=1000&json=1\")\r\n json_response = requests.get(api_url).json()\r\n products = json_response[\"products\"]\r\n\r\n for product in products:\r\n clean_product = {\r\n k: v for k, v in product.items()\r\n if k in FIELD_NEEDED and v != ''}\r\n clean_products.append(clean_product)\r\n\r\n return clean_products",
"async def getCategories(self, department=None):\n payload = {}\n \n if department:\n payload[\"department\"] = department\n \n\n # Parameter validation\n schema = CatalogValidator.getCategories()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/categories\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"department\",\"description\":\"The name of the department. Use this parameter to filter products by a particular department. See below the list of available departments. You can retrieve available departments from the **v1.0/departments/** API\",\"schema\":{\"type\":\"string\",\"enum\":[\"baby-care-kids-essentials\",\"beauty-personal-care\",\"home-living\",\"kids\",\"men\",\"others\",\"toys\",\"women\"]},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"department\",\"description\":\"The name of the department. Use this parameter to filter products by a particular department. See below the list of available departments. You can retrieve available departments from the **v1.0/departments/** API\",\"schema\":{\"type\":\"string\",\"enum\":[\"baby-care-kids-essentials\",\"beauty-personal-care\",\"home-living\",\"kids\",\"men\",\"others\",\"toys\",\"women\"]},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", department=department)\n query_string = await create_query_string(department=department)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/categories\", department=department), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def retrieve(self, request, pk=None):\n try:\n category = Category.objects.get(pk=pk)\n serializer = CategorySerializer(category, context={'request': request})\n return Response(serializer.data)\n except Category.DoesNotExist:\n return Response({'message' : \"***BUZZER NOISE***, doesn't exist, try again\"},\n status=status.HTTP_404_NOT_FOUND)"
] | [
"0.71682376",
"0.7032157",
"0.69441664",
"0.6676812",
"0.66664386",
"0.6588189",
"0.6565913",
"0.64754415",
"0.6378403",
"0.6333164",
"0.6246395",
"0.6242302",
"0.6219372",
"0.6217133",
"0.6207262",
"0.6182406",
"0.6178808",
"0.61548734",
"0.61504626",
"0.6134846",
"0.60963714",
"0.60862243",
"0.6076432",
"0.60694516",
"0.6050827",
"0.6031401",
"0.6014498",
"0.60077184",
"0.5997869",
"0.59955186"
] | 0.7259174 | 0 |
returns True if event ends a possession, False otherwise | def is_possession_ending_event(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_as_possession(self):\n if self.is_possession_ending_event:\n if self.seconds_remaining > 2:\n return True\n # check when previous possession ended\n prev_event = self.previous_event\n while prev_event is not None and not prev_event.is_possession_ending_event:\n prev_event = prev_event.previous_event\n if prev_event is None or prev_event.seconds_remaining > 2:\n return True\n # possession starts in final 2 seconds\n # return True if there is a FT or FGM between now and end of period\n next_event = prev_event.next_event\n while next_event is not None:\n if isinstance(next_event, FreeThrow) or (\n isinstance(next_event, FieldGoal) and next_event.is_made\n ):\n return True\n next_event = next_event.next_event\n return False",
"def episode_end(self):\n return self.game.is_episode_finished()",
"def ended(self):\n return self.dur <= 0",
"def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs",
"def has_ended(self):\r\n if self.end is None:\r\n return False\r\n\r\n return datetime.now(UTC()) > self.end",
"def end(event: EventType, widget: WidgetType) -> bool:\n return event.key == _locals.K_END",
"def is_done(self):\n return True if self.t >= self.max_ep_len else False",
"def event_processing_finished(self):\n if self.status in ACTIVE_STATES:\n return False # tally of events is only available at end of run\n try:\n event_qs = self.get_event_queryset()\n except NotImplementedError:\n return True # Model without events, such as WFJT\n return self.emitted_events == event_qs.count()",
"def is_endgame_state(self) :\n raise NotImplementedError",
"def sessionEnded(self):\r\n if self.sessionStarted == True: \r\n self.sessionCompleted = True",
"def is_call_ended(self) -> bool:",
"def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie",
"def has_end(self):\n return bool(self._end)",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables): #\n return True, -1\n\n return False, -1",
"def has_happened(self):\n\n return self.end < timezone.now()",
"def EndOfPacket(self) -> bool:",
"def isOpen(self):\n\t\treturn not self.endgame",
"def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False",
"def _is_finish(self, pos):\r\n return self.course[pos[0], pos[1]] == 2",
"def session_finished(self):\n return bool(self.shuttingdown and not self._active_nodes)",
"def check_end(self):\n return [self.x, self.y] == self.end_pos",
"def isDone(self, game):\n from pygame.locals import K_ESCAPE, QUIT \n if game.keystate[K_ESCAPE] or pygame.event.peek(QUIT):\n return True, False \n else:\n return False, None",
"def is_happening(self):\n now = timezone.now()\n start = self.start\n end = self.end\n happening = False\n # check that the event has started and 'now' is btwn start & end:\n if (now >= start) and (start.time() <= now.time() <= end.time()):\n happening = True\n return happening",
"def tellIfEnded(self):\n self.congratulate()",
"def _isclose(self):\n return self.dp.state()==PyTango.DevState.CLOSE",
"def is_end_game(state):\n if YoteRules.is_boring(state) or YoteRules.is_player_stuck(state, state.get_next_player()):\n return True\n latest_player_score = state.score[state.get_latest_player()]\n if latest_player_score >= MAX_SCORE:\n return True\n return False",
"def get_is_on(self, event: Event | None) -> bool:\n if event is None:\n return False\n\n now = dt_util.utcnow()\n value = now > event.start\n if value and event.end is not None and now > event.end:\n value = False\n\n return value"
] | [
"0.75881785",
"0.702573",
"0.7022446",
"0.6956652",
"0.6942744",
"0.68245476",
"0.66753006",
"0.6652011",
"0.6642587",
"0.65541005",
"0.6489061",
"0.64834446",
"0.64689803",
"0.6468721",
"0.64471143",
"0.6441375",
"0.64016587",
"0.63684183",
"0.63627636",
"0.63627636",
"0.63463587",
"0.63345134",
"0.6326823",
"0.62915725",
"0.62703854",
"0.6261182",
"0.6244961",
"0.62377995",
"0.6223622",
"0.6213036"
] | 0.85768574 | 0 |
returns list of dicts with all stats for event | def event_stats(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def event_stats(self):\n return self.base_stats",
"def get_metrics(event):\n return tba_session.get(BASE_URL + '/event/%s/oprs' %event).json()",
"def event_dicts(self):\n events = []\n # We're assuming that the table has alternating rows that\n # containg (date, event title) possibly followed by (<empty>,\n # event details).\n selector = '#ae-billing-logs-table > tbody > tr'\n for (date_elt, event_elt) in self.doc.cssselect(selector):\n if date_elt.text is not None:\n events.append({\n # <td>EVENT DATE</td>\n 'date': date_elt.text.strip(),\n # <td><span id=\"...\">EVENT TITLE</span></td>\n 'title': event_elt.findtext('span').strip()\n })\n else:\n # An empty first column indicates details for the\n # preceeding event.\n assert len(events) > 0, len(events)\n last_event = events[-1]\n if last_event['title'].startswith('Usage Report '):\n last_event['details'] = self._usage_report_dict(event_elt)\n return events",
"def get_all_stat(self):\n all_stat=dict()\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n all_stat[stat_type] = stat\n return all_stat",
"def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }",
"def summarized_events(self):\n return self._summarized_events",
"def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)",
"def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)",
"def stats(self):\r\n return {}",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def event_processor(self, _events):\r\n\r\n event_list = []\r\n #iter a list of event names\r\n for event_name in _events:\r\n counter = {}\r\n try:\r\n #filter events in Events table by event name\r\n events_queryset = Event.objects.filter(event__name=event_name)\r\n counter['event'] = event_name\r\n counter['count'] = 0\r\n\r\n #django querysets is better to count with native counter instead of len()\r\n if events_queryset.count() > 0:\r\n\r\n #iter the objects instances and increase the counter\r\n for query in events_queryset:\r\n counter['count'] = counter['count'] + query.counter\r\n\r\n \r\n event_list.append([counter])\r\n\r\n except Exception as EventFilterException:\r\n print(EventFilterException)\r\n \r\n return event_list",
"def aggregated_results(self, limit=2000) -> List[dict]:\n stored_events = []\n for events in self._iter_events():\n stored_events.extend(events)\n if len(stored_events) >= limit:\n return stored_events[:limit]\n return stored_events",
"def stats():\r\n times_lst = []\r\n time_dict = {}\r\n for album, details in dbase().items():\r\n time_m = 0\r\n time_s = 0\r\n for songs, details_s in details[0].items():\r\n time = details_s[1].split(\":\")\r\n min = int(time[0])\r\n sec = int(time[1])\r\n time_m += min\r\n time_s += sec\r\n time_s = datetime.timedelta(seconds=time_s)\r\n time_m = datetime.timedelta(seconds=time_m)\r\n time = time_m + time_s\r\n time = str(time)\r\n times_lst.append(time)\r\n time_dict[album] = time\r\n\r\n time_dict = sorted(time_dict.items(), key=lambda x: x[1], reverse=True)\r\n return time_dict",
"def stats(self) -> Dict:\n return self._stats",
"def events(self):\n return get_tsv(self.path, self.values, 'events.tsv')",
"def get_stats(self) -> Dict[str, Any]:\r\n stats = {}\r\n for attr in [attr for attr in self.__dict__ if attr not in Stats.PRINT_IGNORES]:\r\n stats[attr] = self.get_stat(attr)\r\n stats[\"level\"] = self.level\r\n return stats",
"def get_stats(evts_perigee) -> Table:\n rows = []\n\n for evt in reversed(evts_perigee):\n rows.append(evt.info)\n\n out = Table(rows=rows)\n return out",
"def get_event_list(self):\n pass",
"def get_stats(self, datalist, state):\n data = {}\n for i, col in zip(range(5), datalist[0].items()):\n [diff, per_diff, tot_percentage] = self.get_diff_and_percentage(datalist[0].iloc[i], datalist[1].iloc[i],\n state)\n data[col[0]] = {\"value\": datalist[0].iloc[i], \"change\": diff, \"change_per\": per_diff,\n \"tot_percentage\": tot_percentage}\n return data",
"def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())",
"def get_sample_events(self): \n return self.sample_events[:]",
"def get_all_events(self):\n s = OrderedSet([self.EVENT_TOTAL_PROGRESS])\n s.update(self.get_events())\n return s",
"def get_all(self):\r\n ret = []\r\n for cache_name, stat in self.stats_per_cache.items():\r\n ret.append({\r\n 'cache_name': cache_name,\r\n 'num_hits': len(stat.hit_targets),\r\n 'num_misses': len(stat.miss_targets),\r\n 'hits': stat.hit_targets,\r\n 'misses': stat.miss_targets\r\n })\r\n return ret",
"def get_events_list(self):\n # REMARK: Maybe possible to optimize ?\n # - Not using a for loop ?\n # - Storing the ds to avoid reading all events when using the function a second time\n\n # Gather events in ds\n events_df = pd.DataFrame(columns=['name', 'date_start', 'date_end', 'duration', 'type_event', 'is_atypical'])\n for event in self.set_atypical_events:\n events_df = events_df.append(event.get_info(), ignore_index=True)\n\n # Sort ds according to date_start\n events_df = events_df.sort_values('date_start')\n events_df = events_df.reset_index(drop=True)\n\n return events_df",
"def get_events_batch() -> PayloadDictList:\n ...",
"def metrics(self):\n\n return six.iteritems(self.__stats_table)",
"def GetEventCountsSinceLastCall(self):\n event_map = {}\n self.lock.acquire()\n for event in self.events:\n event_map[event.name] = event.count\n event.count = 0\n self.lock.release()\n return event_map",
"def get_stats(self, phases=4):\n # TODO: Refactor to static variables plus updates. (signals?).\n\n scope = get_scope_by_name(self.scope)()\n stats = [0 for x in range(phases)] # [0, 0, 0, ...] empty result array\n\n for phase in range(phases): # for each phase\n start = scope.start\n end = scope.end\n stats[phase] = self.track_events.filter(created__date__range=(start, end)).count()\n scope = scope.previous\n\n return stats",
"def statistics(self):\n stats = {}\n fields = {\n 'Hit count:': ('hit_count', Value.from_number),\n 'Miss count:': ('miss_count', Value.from_number),\n 'Hit ratio:': ('hit_ratio', Value.from_percent),\n 'Item count:': ('item_count', Value.from_number),\n 'Total cache size:': ('total_cache_size', Value.from_number),\n 'Oldest item age:': ('oldest_item_age', Value.from_time_ago),\n }\n selector = '#ae-stats-table tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 2, [text(child) for child in children]\n if text(children[0]).strip() in fields:\n # skip rows with invalid or empty cells\n field_name, value_fn = fields[text(children[0]).strip()]\n stats[field_name] = value_fn(text(children[1]))\n # Ensure all fields were filled.\n assert len(stats) == len(fields), (fields.keys(), stats.keys())\n return stats",
"def getAll(self, event_name):\n raw_events = self._callEventGetAll(self._id, event_name)\n return [snippet_event.from_dict(msg) for msg in raw_events]"
] | [
"0.7214178",
"0.69517624",
"0.67323565",
"0.6659741",
"0.6565855",
"0.654559",
"0.65454817",
"0.6497064",
"0.64058197",
"0.6327785",
"0.6292388",
"0.62751824",
"0.62552947",
"0.62509114",
"0.6241405",
"0.62282646",
"0.6225516",
"0.62236094",
"0.62227386",
"0.62097263",
"0.6165822",
"0.61341",
"0.6130355",
"0.61083704",
"0.6093256",
"0.606873",
"0.60395586",
"0.60105634",
"0.59936297",
"0.5990692"
] | 0.7740182 | 0 |
returns team id for team on offense for event | def get_offense_team_id(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_team_id(team_name):\n\n team_name = team_name.lower()\n endpoint = \"/teams\"\n response = api.nhl_api(endpoint)\n\n if not response:\n raise ConnectionError(\"An invalid response was returned from the NHL Teams API.\")\n\n teams_json = response.json()\n teams = teams_json[\"teams\"]\n\n team_id = None\n for team in teams:\n if team[\"name\"].lower() == team_name:\n team_id = team[\"id\"]\n break\n\n if not team_id:\n raise ValueError(\"{} is not a valid NHL team. Check your configuraiton file!\".format(team_name))\n\n return team_id",
"def team_id(self):\n return self._team_id",
"def teamsatevent():\n if not is_request_valid(request):\n abort(400)\n \n text = request.form['text']\n \n event, _ = find_event_and_match(text)\n \n if event == None:\n return EVENT_NOT_FOUND\n \n # This gets all the teams at the event\n teams = requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetSingleByTypeAndId/teams/{EVENT_IDS[event]}').json()\n return_str = ''\n for team in teams:\n name = teams[team]['team_name']\n location = teams[team]['team_loc']\n # Append the team name, number, and location to the response string\n return_str += f'\\n* {team}-{name} \\t{location}'\n \n return jsonify(\n response_type = 'in_channel',\n type='mrkdwn',\n text = return_str\n )\n \n # If there is no event, then event not found\n return EVENT_NOT_FOUND",
"def getTeamByEspn(self, name):\n # team, created = Teams.objects.get_or_create(name_espn=name, defaults={'country_id' : 1})\n # return int(team.id)\n\n try:\n team = Teams.objects.get(name_espn=name)\n return int(team.id)\n except:\n return 0",
"def get_team_id(self, team_name):\n\n teams = self.get_teams()\n for team in teams:\n if team['name'] == team_name:\n return team['id']\n\n return None",
"def winning_team(self):\n return self.team_id",
"def get_team_alliance(event: str, match: int, team: int) -> typing.Optional[str]:\n \n if team in get_match_alliances(event, match)['red']:\n return 'red'\n elif team in get_match_alliances(event, match)['blue']:\n return 'blue'\n else:\n return None",
"def get_team_id(self):\n try:\n return self.profile_data[\"proTeamId\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve the player's team id: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)",
"def find_team(self):\n if self.team_id is not None:\n return ItopapiPrototype.get_itop_class('Team').find(self.team_id)\n return None",
"def get_team(self):\n if self.team:\n return self.team\n return None",
"def test_data_source_soaps_id_team_get(self):\n pass",
"def get_team(self, game_state):\n if self.red:\n return game_state.get_red_team_indices()\n else:\n return game_state.get_blue_team_indices()",
"def get_team(team):\n if team == \"left\":\n return \"0\"\n elif team == \"right\":\n return \"1\"\n elif team == \"spec\":\n return \"-1\"",
"def test_basketballteams_id_get(self):\n pass",
"def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)",
"def test_workflows_id_team_get(self):\n pass",
"def get_geneva_investment_id(trade_info):\n\tif trade_info['SCTYID_ISIN'] != '':\n\t\treturn get_investment_Ids(trade_info['ACCT_ACNO'], 'ISIN', trade_info['SCTYID_ISIN'])[0]\n\telif trade_info['SCTYID_SMSEQ'] != '':\n\t\treturn get_investment_Ids(trade_info['ACCT_ACNO'], 'FT', trade_info['SCTYID_SMSEQ'])[0]\n\telse:\n\t\tlogger.error('get_geneva_investment_id(): no security identifier found.')\n\t\traise InvestmentIdNotFound()",
"def getTeam(self):\n return self.team",
"def get_teams():",
"def team(self):\n return self._team",
"def getOpposition(self, team):\n if team.lower() == self.homeTeam['name']:\n return self.awayTeam['name']\n elif team.lower() == self.awayTeam['name']:\n return self.homeTeam['name']\n else:\n return None",
"def team_name(self, team_name):\n self.team_id = self.get_team_id(team_name)\n LOG.debug(\"Mattermost team id: %s\", self.team_id)",
"def test_get_one_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='[email protected]',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], teammate.uid)",
"def get_game(self, game_id):\n \n session = requests.session()\n response = session.get(self.baseURL + str(game_id), headers=self.headers)\n soup = BeautifulSoup(response.text)\n \n #get teams\n defeated_by = False \n game_header = soup.find_all(text=re.compile('defeats'))\n \n if len(game_header) == 0:\n game_header = soup.find_all(text=re.compile('defeated by'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('defeat'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('drew'))\n defeated_by = True \n else:\n defeated_by = True \n\n if defeated_by: \n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[3]\n else:\n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[2]\n \n date_string = game_header[0].split(' ')\n date_string_find = [date.lower() for date in date_string]\n \n venue = date_string[date_string_find.index('at') + 1]\n \n #get round\n round_num = None\n \n try:\n date_string_find.remove('')\n except:\n pass\n \n try:\n round_num = int(date_string[date_string_find.index('round') + 1])\n except:\n try:\n round_num = date_string_find[date_string_find.index('final') - 1] + ' final'\n except:\n round_num = date_string_find[date_string_find.index('semi-final')]\n \n date = date_string[-3:]\n date = ' '.join(date) \n date = parser.parse(date)\n \n #get attendance\n attend = soup.find_all(text=re.compile('Attendance'))\n attendance = 0\n \n if (len(attend) > 3):\n attendance = int(attend[1].split(' ')[-1])\n \n #get stats \n away_stats = {}\n home_stats = {}\n \n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n elements = stat_row.find_all('td')\n \n if elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = elements[0].text\n \n if elements[0].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = elements[2].text\n \n return Game(game_id, home_team, away_team, venue, round_num, date, attendance, home_stats, away_stats)",
"def get_tournament_id(self):\n User = Query()\n serialized_tournament = self.serialize_tournament()\n documents = table_tournaments.search(User.tournament_name == str(serialized_tournament['tournament_name'])\n and User.start_date == str(serialized_tournament['start_date']))\n id_tournament = None\n for document in documents:\n id_tournament = document.doc_id\n return id_tournament",
"def get_teams_attending_two_events(event1, event2):\n teams_at_both = []\n teams1 = get_teams(event1)\n teams2 = get_teams(event2)\n for team1 in teams1:\n if team1 in teams2:\n teams_at_both.append(team1)\n return teams_at_both",
"def get_teams_at_match(event: str, match: int) -> typing.Optional[typing.List[int]]:\n \n match_data = get_match_data(event, match)\n if match_data == None:\n return None\n return [int(x.split('-')[1]) for x in match_data]",
"def _eidnamelookup(eid):\n\n url = b64decode('aHR0cDovL20uZXNwbi5nby5jb20vbmZsL3BsYXllcmluZm8/cGxheWVySWQ9') + eid + '&wjb='\n req = urllib2.Request(url)\n r = urllib2.urlopen(req)\n html = r.read()\n try:\n soup = BeautifulSoup(html)\n team = soup.find('td', attrs={'class':'teamHeader'}).find('b')\n name = soup.find('div', attrs={'class':'sub bold'})\n return \"{0} {1}\".format(team.getText(), name.getText())\n except Exception, e:\n print \"ERROR: _eidnamelookup :: {0}\".format(e)\n return None",
"def getLeagueByEspn(self, name):\n\n league, created = Leagues.objects.get_or_create(name_espn=name)\n return int(league.id)",
"def getPlugEventId(self, pid, ename):\n for event in self._events.values():\n if event.name == ename and event.pid == pid: \n return event.ID\n return None"
] | [
"0.6402092",
"0.63786477",
"0.6285374",
"0.62278175",
"0.6203459",
"0.617215",
"0.5979764",
"0.5927196",
"0.573476",
"0.57201517",
"0.56839955",
"0.5652449",
"0.5618322",
"0.5600507",
"0.5562163",
"0.5533258",
"0.55129105",
"0.5488184",
"0.5462104",
"0.54099923",
"0.53854465",
"0.5382094",
"0.5345744",
"0.5304716",
"0.5298862",
"0.5275818",
"0.52631134",
"0.52600366",
"0.52594215",
"0.52588314"
] | 0.82629144 | 0 |
returns list of all events that take place as the same time as the current event | def get_all_events_at_current_time(self):
events = [self]
# going backwards
event = self
while event is not None and self.seconds_remaining == event.seconds_remaining:
if event != self:
events.append(event)
event = event.previous_event
# going forwards
event = self
while event is not None and self.seconds_remaining == event.seconds_remaining:
if event != self:
events.append(event)
event = event.next_event
return sorted(events, key=lambda k: k.order) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_curr_events(self):\n today = datetime.date.today()\n return self.s.query(Event).filter(Event.time > today).all()",
"def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]",
"def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)",
"def nextEvents(self):\n # sort based on timestamp\n self.event_q.sort(key=lambda evt: evt.timestamp)\n\n # there may be events with same timestamp\n events = []\n earliest_ts = self.event_q[0].timestamp\n while len(self.event_q) > 0:\n if self.event_q[0].timestamp == earliest_ts:\n evt = self.event_q.pop(0)\n events.append(evt)\n else:\n break\n return events",
"def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]",
"def events(self):\n return self.current_events",
"def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events",
"def event_list(self):\n return self._event_list",
"def get_event_list(self):\n pass",
"def get_timed_events(self):\n return self.dispatcher.timed_events",
"def eventList(self):\n return self._eventList",
"def upcoming(\n self,\n event_filter: str = \"\"\n ) -> List[sched.Event]:\n\n return [\n event for event in self.scheduler.queue\n if event.argument[0] == event_filter\n or not event_filter\n ]",
"def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times",
"def get_events(self):\n return self.events",
"async def events(self) -> Iterable[Event]:",
"def get_events(self):\n return self.s.query(Event).all()",
"def expired_alarm():\n temp_events = Events_list.copy()\n for x in range(len(temp_events)):#iterates for the whole events list\n if time.time() >= convert_to_epoch(temp_events[x][1]):#if the time set is less than current time it must be expired\n event_remove(temp_events[x][0])",
"def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events",
"def events(self):\n return self.search(comp_class=Event)",
"def getListOfEvents(self):\n return self.model.getListOfEvents()",
"def walk(self) -> [(str, bool, any)]:\n t = time()\n return [(k, (t > v[0]), v[1]) for k, v in self.timers.items()]",
"def get_sample_events(self): \n return self.sample_events[:]",
"def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]",
"def get_event_list(self):\n event_list = []\n eventLocation = -1\n for element in self:\n eventLocation += 1\n if element.isChunk():\n event = element.embedded_event()\n if event:\n event_list.append((eventLocation, event.eid))\n return event_list",
"def events(self):\n return self._events",
"def get_all_debug_events() -> Event:\n return Event.objects.filter(level__contains=\"debug\")",
"def collect_new_events(self) -> list:\n self.logger.debug('Collecting new events...')\n events = self.build_events()\n if not events:\n self.logger.debug('No new events.')\n for event in events:\n self.logger.info('A new event has been detected: {}'.format(event))\n self._buffer_buisy_mutex.acquire()\n self._events_buffer.append(event)\n self._buffer_buisy_mutex.release()",
"def build_events(self) -> list:\n raise NotImplementedError()",
"def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()",
"def list_events(self, name):\n return self._get_events(name)"
] | [
"0.7656709",
"0.7239735",
"0.670789",
"0.66426444",
"0.65620106",
"0.65207577",
"0.64506966",
"0.63653976",
"0.63593066",
"0.6298316",
"0.6236205",
"0.6210293",
"0.6126111",
"0.6072726",
"0.60209477",
"0.59940344",
"0.5987256",
"0.59750044",
"0.5967468",
"0.59477067",
"0.5942667",
"0.59343284",
"0.5933212",
"0.5931283",
"0.59030044",
"0.58991945",
"0.5883167",
"0.58717966",
"0.5871275",
"0.5852835"
] | 0.7872362 | 0 |
returns dict with list of player ids for each team with players on the floor for current event For all non subsitution events current players are just the same as previous event | def current_players(self):
return self.previous_event.current_players | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_players_on_floor(self):\n for period in self.Periods:\n # set current players to be period starters\n current_players = period.Starters.copy()\n for pbp_event in period.Events:\n if pbp_event.is_substitution():\n coming_in = pbp_event.player2_id\n going_out = pbp_event.player_id\n team_id = pbp_event.team_id\n current_players[team_id] = [coming_in if player == going_out else player for player in current_players[team_id]]\n pbp_event.current_players = current_players.copy()",
"def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list",
"def lineup_ids(self):\n lineup_ids = {}\n for team_id, team_players in self.current_players.items():\n players = [str(player_id) for player_id in team_players]\n sorted_player_ids = sorted(players)\n lineup_id = \"-\".join(sorted_player_ids)\n lineup_ids[team_id] = lineup_id\n return lineup_ids",
"def get_player_states(self, players):\n for player in self.players.values():\n p = players.add()\n p.id = player.id\n p.pos.CopyFrom(player.pos.to_protocol())",
"def get_starter_map(self, draft_group_players):\n self.starter_map = {}\n now = timezone.now()\n for p in self.draft_group_players:\n # print( str(now), ' >= ', str(p.start))\n if now >= p.start:\n self.starter_map[p.salary_player.player_id] = p.salary_player.player_id\n else:\n self.starter_map[p.salary_player.player_id] = self.PLAYER_NOT_STARTED\n return self.starter_map",
"def match_with_player(self, name, player_cal):\n updated_team_cal = self.team_cal.copy()\n filled_team_keys = []\n\n for loc in player_cal.stack().index:\n current_player_count = self.team_cal.at[loc]\n if self.price_cal.at[loc] <= player_cal.at[loc]:\n if current_player_count < self.team_size * 2:\n updated_team_cal.at[loc] += 1\n self.team_dict[f'{loc[1]}-{loc[0]}'].append(name)\n if current_player_count == self.team_size * 2 - 1:\n filled_team_keys.append(f'{loc[1]}-{loc[0]}')\n else:\n continue # team is filled\n\n self.team_cal = updated_team_cal\n return filled_team_keys",
"def teammates_player_ids(self):\n return [p.player_id for p in self.teammates]",
"def get_all_players():\n players = {}\n\n for char in list(string.ascii_uppercase):\n req = requests.get(\n 'http://www.euroleague.net/competition/players?listtype=alltime&letter=' + char\n )\n\n soup = BeautifulSoup(req.text, 'html5lib')\n\n mydivs = soup.findAll('div', {'class': 'items-list'})\n\n for div in mydivs:\n itemdivs = soup.findAll('div', {'class': 'item'})\n\n\n for div in itemdivs:\n links = div.findAll('a')\n for index, link in enumerate(links):\n if index % 2 == 0:\n player = link.text.replace(',', '').strip()\n link['href'] = link['href'].replace('?', '')\n result = re.findall(\n '/competition/players/showplayerpcode=(.*)&seasoncode=', link['href']\n )\n code = result[0]\n players[code] = player\n \n return players",
"def players(self) -> dict[int, Player]:\n return self._players",
"def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id",
"def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]",
"def own_games(self):\r\n return sorted(self.games + self.finals, key=lambda g: (g.datetime, g.pitch.rank))",
"def __get_player_occurrences(all_players: List[Player], face_encodings: List) -> Dict[Player, Set[int]]:\n occurrences = dict()\n for player in all_players:\n occurrences[player] = set()\n\n video_capture = cv2.VideoCapture(EPISODE_VIDEO_LOCATION)\n length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_index = 0\n while video_capture.isOpened():\n continues, frame = video_capture.read()\n if not continues: # If this is the last frame then stop\n video_capture.release()\n break\n frame_index += 1\n if frame_index % VideoParser.PROGRESS_FRAMES_FREQUENCY_PRINT == 0: # Prints the progress\n print('{}/{}'.format(frame_index, length))\n if frame_index % FRAME_SKIP == 0: # Only analyse every FRAME_SKIP frame\n player_occurrence = VideoParser.__get_player_occurrence_in_frame(frame, all_players, face_encodings)\n for player in player_occurrence:\n occurrences[player] = occurrences[player].union({frame_index})\n\n return occurrences",
"def get_players():\n return {\"X\": play_human, \"O\": play_ai}",
"def _get_projected_data(self, season):\n try:\n match_data = []\n player_id = self.profile_data[\"id\"]\n matches = season.get_season_data()[\"stats\"][\"projectedPlayerStats\"]\n for match in matches:\n if player_id == match[0]:\n match_data.append(match)\n return match_data\n except Exception as e:\n error_msg = (\"Failed to retrieve player match data: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)",
"def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game",
"def playerStandings():\n # place all players in a dictionary\n player_dict = {}\n conn, c = connect()\n c.execute(\"\"\"SELECT * FROM players;\"\"\")\n for row in c.fetchall():\n player_dict[row[0]] = [row[1], 0, 0]\n\n # count the number of win and matches in for all matches\n c.execute(\"\"\"SELECT winner, loser FROM matches;\"\"\")\n for row in c.fetchall():\n if row[0] in player_dict:\n player_dict[row[0]][1] += 1\n player_dict[row[0]][2] += 1\n if row[1] in player_dict:\n player_dict[row[1]][2] += 1\n\n # compile win counts as the key to dictionary\n win_count = {}\n for i in player_dict:\n wins = player_dict[i][1]\n if wins in win_count:\n win_count[wins].append((i, player_dict[i][0],\n wins, player_dict[i][2]))\n else:\n win_count[wins] = [(i, player_dict[i][0],\n wins, player_dict[i][2])]\n\n # compile output list\n output_list = []\n for i in sorted(win_count.keys(), reverse=True):\n for j in win_count[i]:\n output_list.append(j)\n\n return output_list",
"def _load_player_map(self) -> None:\n # Loading people that have had ab appearance in the year specified\n # This might not be general enough as some players get paid even if they don't play\n sql = \"\"\"\\\n select p.playerid, p.namefirst, p.namelast, p.namegiven, a.team_id\n from people p\n INNER JOIN appearances a ON p.playerid = a.playerid and a.yearid = %s\n \"\"\"\n\n self._cursor.execute(sql, (self._yearid,))\n duplicates = 0\n all_players = self._cursor.fetchall()\n for player in all_players:\n r = {'playerid': player[0], 'namefirst': player[1], 'namelast': player[2],\n 'namegiven': player[3], 'team_id': player[4]}\n\n # Build a key from namefirst, namelast and team_id, then remove all spaces\n # Make sure we don't already have the player loaded, count and report duplicates.\n key = \"{}{}{}\".format(player[1], player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key) is None:\n self._player_map[key] = r\n else:\n duplicates += 1\n\n # We'll add the player again using his given first name if different from namefirst\n given_first = player[3].split()[0]\n if given_first != player[1]:\n key2 = \"{}{}{}\".format(given_first, player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key2) is None:\n self._player_map[key2] = r\n else:\n duplicates += 1\n\n if duplicates > 0:\n raise RuntimeError(\"Duplicates found building player map: \" + str(duplicates))",
"def get_player_stats_from_game(team, year, week):",
"def players(game_id):\n # get data\n data = mlbgame.data.get_players(game_id)\n # parse data\n parsed = etree.parse(data)\n root = parsed.getroot()\n\n output = {}\n output['game_id'] = game_id\n\n # get player/coach data\n for team in root.findall('team'):\n type = team.attrib['type'] + \"_team\"\n # the type is either home_team or away_team\n output[type] = {}\n output[type]['players'] = []\n output[type]['coaches'] = []\n\n for p in team.findall('player'):\n player = {}\n for key in p.keys():\n player[key] = p.get(key)\n output[type]['players'].append(player)\n\n for c in team.findall('coach'):\n coach = {}\n for key in c.keys():\n coach[key] = c.get(key)\n output[type]['coaches'].append(coach)\n\n # get umpire data\n output['umpires'] = []\n for u in root.find('umpires').findall('umpire'):\n umpire = {}\n for key in u.keys():\n umpire[key] = u.get(key)\n output['umpires'].append(umpire)\n\n return output",
"def get_player_data(soup, game_dict):\n\n # Loop through teams to store information by team.\n for i, team in enumerate([\"home\", \"away\"]):\n try:\n plyrs_soup = soup.findAll(\n \"div\", {\"class\": \"aufstellung_ausgabe_block {}side\".format(team)})[0]\n plyr_data = plyrs_soup.findAll(\"a\", {\"class\": \"spieler_linkurl\"})\n\n # Loop through players by team.\n for j, plyr in enumerate(plyr_data):\n try:\n game_dict[\"{}_plyr_{}\".format(team, j)] = plyr.text\n game_dict[\"{}_plyr_url_{}\".format(team, j)] = plyr[\"href\"]\n except AttributeError:\n pass\n except (AttributeError, IndexError):\n pass\n\n return game_dict",
"def get_ipl_player_to_users_mapping(teams=None):\n\n ipl_players = defaultdict(list)\n for user_id in USER_IDS:\n for player in get_squad_details(user_id)['players']:\n player_details = get_player(player)\n if teams and not player_details['team'] in teams:\n continue\n ipl_players[player_details.name].append(\n get_league_team_name_for_user(user_id))\n\n return ipl_players",
"def get_team_gk_ids(match_id):\n homeid, awayid, all = get_match_info(match_id)\n data = service_request(\"GetMatchSquad\", {\"matchId\": match_id})\n gks = {k: {\"team_id\": data.get(k).get(\"data\")[3],\n \"jersey_no\": data.get(k).get(\"data\")[1],\n \"player_id\": k}\n for k in data if data.get(k).get(\"data\")[4]==1 and data.get(k).get(\"data\")[2]==1}\n\n teams = {\n int(homeid): 0,\n int(awayid): 1\n }\n\n return {teams.get(gks.get(k).get(\"team_id\")):gks.get(k) for k in gks}",
"def get_info(self):\n players = self.player_set\n if self.order_by == 'rank':\n players = players.order_by('rank')\n else:\n players = players.order_by('-rating')\n\n # if players have None scores, move to the bottom\n none_players = []\n players = list(players)\n for player in players:\n if ((self.order_by == 'rank' and player.rank is None)\n or (self.order_by == 'rating' and player.rating is None)):\n none_players.append(player)\n players.remove(player)\n players.extend(none_players)\n\n return dict(\n players = players,\n players_json = json.dumps([\n {\n 'id': player.id,\n 'name': \"%d %s\" % (\n player.rank or len(players), player.name)\n }\n for player in players]),\n num_matches = self.match_set.count(),\n num_rounds = Round.objects.filter(match__company = self).count(),\n recent_matches = (self.match_set\n .order_by('-played_time')[:MATCH_RESULT_LIMIT]),\n api_account_id = self.get_api_account_id(),\n api_access_key = self.get_api_access_key()\n )",
"def _get_player_with_possession(frame):\n for t in frame['teams']:\n for p in t.get('players',[]):\n if p.get('possession'):\n return p\n return {}",
"def get_tourney_rounds(self, conference, year):\n ts_dict = self.get_tourney_slots()\n seed_dict = self.get_tourney_seeds()\n tr_dict = self.get_tourney_results()\n \n round_1 = list()\n round_2 = list()\n round_3 = list()\n round_4 = list()\n winner = list()\n \n round1_winners = list()\n for seed, team in seed_dict[year].items():\n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n round1_winners.append(seed[1:])\n #removes duplicates because I did this part weirdly... HEHEH\n round1_winners = list(set(round1_winners))\n\n win_counter = defaultdict(int)\n for seed, team in seed_dict[year].items(): \n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n win_counter[winning] += 1\n \n for slot, matchup in ts_dict[year].items():\n \n if conference in slot and \"R1\" in slot: \n round_1.append(\"{}-{}\".format(matchup[1:3], matchup[-2:]))\n round_1 = sorted(round_1)\n #for match in round_1:\n for winner1 in round1_winners:\n if winner1 in round_1[0]:\n for winner2 in round1_winners:\n if winner2 in round_1[-1]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[1]:\n for winner2 in round1_winners:\n if winner2 in round_1[-2]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[2]:\n for winner2 in round1_winners:\n if winner2 in round_1[-3]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[3]:\n for winner2 in round1_winners:\n if winner2 in round_1[-4]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n round_2 = sorted(round_2)\n\n round2_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 1:\n round2_winners.append(seed[1:])\n \n for winner1 in round2_winners:\n if winner1 in round_2[0]:\n for winner2 in round2_winners:\n if winner2 in round_2[-1]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_2[1]:\n for winner2 in round2_winners:\n if winner2 in round_2[-2]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n round_3 = sorted(round_3)\n\n round3_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 2:\n round3_winners.append(seed[1:])\n\n for winner1 in round3_winners:\n if winner1 in round_3[0]:\n for winner2 in round3_winners:\n if winner2 in round_3[-1]:\n round_4.append(\"{}-{}\".format(winner1, winner2))\n round_4 = sorted(round_4)\n\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 3:\n winner.append(seed[1:])\n\n conferences = {\"W\": \"East\", \"X\": \"Midwest\", \"Y\": \"South\", \"Z\": \"West\"}\n\n #print(\"CONFERENCE: {}, YEAR: {}\".format(conferences[conference], year))\n #print(\"ROUND1:\", round_1)\n #print(\"ROUND2:\", round_2)\n #print(\"ROUND3:\", round_3)\n #print(\"ROUND4:\", round_4)\n #print(\"WINNER:\", winner)\n\n #clearing out the tourney results dictionary\n #tr_dict.clear()\n\n return round_1, round_2, round_3, round_4, winner",
"def get_current_lineups():\n out = []\n pf = players[players[\"team\"].isin(top30Teams)]\n for index, row in pf.iterrows():\n # Make sure that we only use player data where a player is\n # playing for their current team\n if(row[\"name\"] in top30Obj[row[\"team\"]]):\n out.append(row)\n return pd.DataFrame(out)",
"def wins(self):\n return [g for g in self.games if g.winner is self.team]",
"def __map_player_id(self, seat): \n internal_player_id = None\n if seat:\n if seat == self.player_id:\n internal_player_id = self.COM_PLAYER_ID\n else:\n internal_player_id = self.OPPONENT_PLAYER_ID\n return internal_player_id",
"def determine_winners(self, players=None):\n players_and_cards = [(holding.player.id, holding.codes) for holding in self.live_holdings]\n if players:\n player_ids = [p.id for p in players]\n players_and_cards = [d for d in players_and_cards if d[0] in player_ids]\n winners = determine_winners(players_and_cards, self.board.codes)\n return [Player.query.get(winner) for winner in winners]"
] | [
"0.65528506",
"0.59010434",
"0.5883524",
"0.5855024",
"0.58492154",
"0.5839282",
"0.57566845",
"0.5750913",
"0.57267255",
"0.57229966",
"0.56700575",
"0.5654687",
"0.5652263",
"0.5650997",
"0.5623131",
"0.56151515",
"0.56052953",
"0.5571534",
"0.5563076",
"0.55602336",
"0.5559568",
"0.55582935",
"0.55519015",
"0.55307144",
"0.5511948",
"0.54912305",
"0.54855657",
"0.547786",
"0.5467498",
"0.54487306"
] | 0.6070743 | 1 |
returns the score margin from perspective of offense team before the event took place | def score_margin(self):
if self.previous_event is None:
score = self.score
else:
score = self.previous_event.score
offense_team_id = self.get_offense_team_id()
offense_points = score[offense_team_id]
defense_points = 0
for team_id, points in score.items():
if team_id != offense_team_id:
defense_points = points
return offense_points - defense_points | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Margin(self):\n s = self.margin\n assert s in range(1,6), \"Margin score out of bounds.\"\n if s == 1: return 'Poor'\n elif s == 2: return 'Near Poor'\n elif s == 3: return 'Medium'\n elif s == 4: return 'Near Sharp'\n elif s == 5: return 'Sharp'",
"def margin(self, probs):\n\n abs_diff = np.abs(probs[:, 1] - probs[:, 0])\n return abs_diff",
"def partisan_att_reward(state, election_results, electoral_votes):\n evotes = int(electoral_votes[electoral_votes['state'] == state].evotes)\n dem_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'democrat')].votes)\n rep_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'republican')].votes)\n total_votes = dem_votes + rep_votes\n margin = (max(dem_votes, rep_votes) -\n min(dem_votes, rep_votes))/total_votes\n return evotes/(1+margin)",
"def getMargin(self):\n assert False",
"def margin_experimental(confidence, n_samples, data_point):\n return st.norm.ppf(1 - (1 - confidence) / 2) * math.sqrt(data_point * (1 - data_point) / n_samples) + 0.5 / n_samples + 0.005",
"def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore",
"def att_reward(state, election_results, electoral_votes):\n evotes = int(electoral_votes[electoral_votes['state'] == state].evotes)\n dem_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'democrat')].votes)\n rep_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'republican')].votes)\n total_votes = dem_votes + rep_votes\n margin = (max(dem_votes, rep_votes) -\n min(dem_votes, rep_votes))/total_votes\n return evotes * margin",
"def get_offset_value():\n # TODO rename it 'get_margin_value'\n # should be greater than 2 (maybe 1 is enough)\n return 5",
"def marginal_score(self,):\n score = 0\n visited = set()\n for edge in self.edges:\n if edge not in visited:\n visited.add(edge)\n visited.add(edge.reverse)\n if len(edge.cars) == 1:\n score += edge.original_distance\n return score",
"def margin(ranking, references):\n lowest_relevant, highest_irrelevant = 0, 0\n for k, prediction in enumerate(ranking, 1):\n if prediction not in references and highest_irrelevant is 0:\n highest_irrelevant = k\n if prediction in references and k > lowest_relevant:\n lowest_relevant = k\n return abs(lowest_relevant - highest_irrelevant)",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def attention(self):\n center_of_attention = 0\n distance = 10000\n for person in self.peoples:\n if person is not None:\n if person.X < distance: #person's depth is now their X position in edwin frame\n center_of_attention = person.ID\n distance = person.X\n\n if center_of_attention != 0:\n return center_of_attention",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n game_phase = len(game.get_blank_spaces()) # high if early, low if late\n\n # Heuristic tries to take advantage of the center and shadowing if possible, otherwise stick to the centre and maximise number of moves \n\n # (*0) Calculate the (theoretical) centre\n center = (game.width / 2., game.height / 2.)\n opponent = game.get_opponent(player)\n loc_player = game.get_player_location(player)\n loc_opponent = game.get_player_location(opponent)\n if game.width % 2 != 0 and game.height % 2 != 0:\n trueCentre = True\n loc_mirror = tuple(abs(x-(game.width-1)) for x in loc_player) # the mirrored location of the player across the axes\n else:\n trueCentre = False\n # (1) Always take the centre!\n if loc_player == center:\n return float(\"inf\")\n # (2) If opponent has the centre, avoid a position within knight's movement at all costs to avoid shadowing\n if loc_opponent == center:\n r, c = center\n directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2),(1, -2), (1, 2), (2, -1), (2, 1)]\n avoidable_positions = [(r + dr, c + dc) for dr, dc in directions]\n if loc_player in avoidable_positions:\n return float(\"-inf\")\n # (3) If we can shadow the opponent, we should!\n if trueCentre:\n if center not in game.get_blank_spaces() and loc_opponent == loc_mirror and len(game.get_legal_moves(player)) == len(game.get_legal_moves(opponent)):\n return float(\"inf\")\n # (4) Finally, we simply return number of moves active player can make minus number of moves opponent can make minus the distance from the centre, weighted by the game phase\n w, h = center\n y, x = loc_player\n dist = float((h - y)**2 + (w - x)**2)\n return (float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))-dist)*game_phase",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def elution_score(self):\n return self.score",
"def disp_score():",
"def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)",
"def _get_reward(self, player_score, opponent_score):\n return player_score - opponent_score",
"def _calculate_score_with_threshold(self):\n\n clue_number = 0\n positive_score, negative_score = 0, 0\n negative_number = 0\n total_score = 0\n\n # find largest negative score\n largest_negative_score = -1.\n for ix, (card, score) in enumerate(self.sorted_card_score_pairs):\n # find maximum score of negative word\n if card.color not in [self.team, \"DOUBLE\"]:\n largest_negative_score = score\n break\n\n # add scores higher than threshold + largest negative score to positive_score\n for card, score in self.sorted_card_score_pairs:\n if (score > (self.delta+largest_negative_score)\n and card.color in [self.team, \"DOUBLE\"]):\n clue_number += 1\n positive_score += score\n elif card.color not in [self.team, \"DOUBLE\"]:\n negative_score += score\n negative_number += 1\n else:\n continue\n\n if not self.penalize_negative:\n self.logger.info(\"negative score set to 0.\")\n negative_score = 0\n\n # if threshold(delta) is large, there will be no clues.\n # try to give at least one clue\n # select the positive card with score larger than largest_negative_score.\n if clue_number == 0:\n self.logger.debug(\"clue number: 0.\")\n for card, score in self.sorted_card_score_pairs:\n if card.color in [self.team, \"DOUBLE\"]:\n positive_score = score\n clue_number += 1\n self.cropped_threshold = score - largest_negative_score\n else:\n positive_score = 0\n break\n\n if self.normalize_negative:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score / negative_number\n else:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score\n self.logger.debug(\"word: {}, positive_score: {}, negative_score: {}, total_score: {}\".format(self.clue, positive_score, negative_score, total_score))\n return total_score, clue_number",
"def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)",
"def occluded(self) -> float:\n return self._coreEstimation.occludedFaceScore",
"def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves",
"def center_modified_score(game, player):\n w, h = game.width / 2., game.height / 2.\n y, x = game.get_player_location(player)\n\n return -float(max(np.abs(h - y), np.abs(w - x)))",
"def margin(self):\n sp = self.sale_price or zero\n if u.isempty(sp):\n return zero\n cp = self.cost_price or zero\n return u.decimal((um-(cp/sp))*cem, True)",
"def getReward (events_counters):\n global P_0, P_1, C_0, C_1, C_2 \n return (P_0 - C_0) * events_counters[0] - (C_0 + C_1) * events_counters[1] - (\n C_2 * events_counters[2] - P_1 * events_counters[3])",
"def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward"
] | [
"0.6575193",
"0.5990418",
"0.59229994",
"0.5904188",
"0.58731693",
"0.58324146",
"0.57912004",
"0.57143426",
"0.56908834",
"0.56435466",
"0.56371015",
"0.56231445",
"0.5613722",
"0.5583762",
"0.5572104",
"0.5566963",
"0.55475724",
"0.5513546",
"0.5509342",
"0.55083126",
"0.54720664",
"0.5455841",
"0.5433281",
"0.5418018",
"0.54056126",
"0.5383369",
"0.5374455",
"0.53702265",
"0.5364305",
"0.53634036"
] | 0.8493469 | 0 |
returns dict with lineup ids for each team for current event. Lineup ids are hyphen separated sorted player id strings. | def lineup_ids(self):
lineup_ids = {}
for team_id, team_players in self.current_players.items():
players = [str(player_id) for player_id in team_players]
sorted_player_ids = sorted(players)
lineup_id = "-".join(sorted_player_ids)
lineup_ids[team_id] = lineup_id
return lineup_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compile_lineups(players, pos, id_list, team):\n lu = []\n subs = []\n names = players[team]\n positions = pos[team]\n ids = id_list[team]\n for n in range(len(names)):\n if names[n][-1] == ' ':\n names[n] = names[n][0:-1]\n for i in range(0, len(names)):\n names[i] = names[i].replace('ñ', 'n')\n if '\\xa0' in names[i]:\n if not i == 0:\n if not '\\xa0' in names[i-1]:\n j = i + 1\n if not j >= len(names):\n while '\\xa0' in names[j]:\n j += 1\n if j >= len(names):\n break\n if not j >= len(names): \n sub_out = names[j]\n sub_out_id = ids[j]\n else:\n sub_out = names[i-1]\n sub_out_id = ids[i-1]\n else:\n j = i + 1\n if not j >= len(names):\n while '\\xa0' in names[j]:\n j += 1\n if j >= len(names):\n break\n if not j >= len(names): \n sub_out = names[j]\n sub_out_id = ids[j]\n subs.append(player.Player(names[i].replace('\\xa0', ''), ids[i], positions[i][0], positions[i][1:] if len(\n positions) > 1 else [], len(lu) + 1, sub_out.replace('\\xa0', ''), sub_out_id, 'available', team))\n else:\n lu.append(player.Player(names[i], ids[i], positions[i][0], positions[i][1:] if len(\n positions) > 1 else [], len(lu) + 1, '', '', 'entered', team))\n return {\"lineup\": lu, \"subs\": subs}",
"def teammates_player_ids(self):\n return [p.player_id for p in self.teammates]",
"def _parse_for_teams(self, event):\n return tuple(re.sub(ranking_regex, \"\", team.text.strip()).strip() for team in event.find_all(\"td\", {\"class\": \"col_teamname\"}))",
"def get_team_gk_ids(match_id):\n homeid, awayid, all = get_match_info(match_id)\n data = service_request(\"GetMatchSquad\", {\"matchId\": match_id})\n gks = {k: {\"team_id\": data.get(k).get(\"data\")[3],\n \"jersey_no\": data.get(k).get(\"data\")[1],\n \"player_id\": k}\n for k in data if data.get(k).get(\"data\")[4]==1 and data.get(k).get(\"data\")[2]==1}\n\n teams = {\n int(homeid): 0,\n int(awayid): 1\n }\n\n return {teams.get(gks.get(k).get(\"team_id\")):gks.get(k) for k in gks}",
"def get_lineups(self):\n [players, positions, ids] = scrape.get_lu_table(self.game_id)\n lu = compile_lineups(players, positions, ids, self.team)\n self.lineup = lu['lineup']\n self.subs = lu['subs']",
"def get_teams():",
"def get_id2line(self):\n id2line = {}\n id_index = 0\n text_index = 4\n with open(self.movie_lines_filepath, 'r', encoding='iso-8859-1') as f:\n for line in f:\n items = line.split(self.DELIM)\n if len(items) == 5:\n line_id = items[id_index]\n dialog_text = items[text_index].strip()\n dialog_text = clean_text(dialog_text)\n id2line[line_id] = dialog_text\n return id2line",
"def get_teams(event):\n teams_raw = tba_session.get(BASE_URL + '/event/%s/teams/keys' % event).json()\n teams = []\n for team_raw in teams_raw:\n teams.append(team_raw[3:])\n return teams",
"def get_offense_team_id(self):\n pass",
"def get_teams_at_match(event: str, match: int) -> typing.Optional[typing.List[int]]:\n \n match_data = get_match_data(event, match)\n if match_data == None:\n return None\n return [int(x.split('-')[1]) for x in match_data]",
"def team_id(self):\n return self._team_id",
"def get_current_lineups():\n out = []\n pf = players[players[\"team\"].isin(top30Teams)]\n for index, row in pf.iterrows():\n # Make sure that we only use player data where a player is\n # playing for their current team\n if(row[\"name\"] in top30Obj[row[\"team\"]]):\n out.append(row)\n return pd.DataFrame(out)",
"def map_team_ids(save=False):\n\tdir_ = os.path.join('data', 'archived_data', '2013')\n\tarch_teams = pd.read_csv(os.path.join(dir_, 'team.csv'))\n\tteam_ids = load_json(os.path.join('data', 'team_ids.json'))\n\tmapping = dict([(str(tid),\"\") for _,tid in team_ids.iteritems()])\n\tfor name, tid in team_ids.iteritems():\n\t\tixName = arch_teams['Name'].values == name\n\t\tif any(ixName):\n\t\t\tmapping[tid] = str(arch_teams['Team Code'].values[ixName][0])\n\tmapping = dict([(old,new) if old != \"\" else (\"old\"+new,new) for new,old in mapping.iteritems()])\n\tif save:\n\t\tdump_json(mapping, 'team_id_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\treturn mapping",
"def get_team_id(self):\n try:\n return self.profile_data[\"proTeamId\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve the player's team id: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)",
"def create_lineups(self):\n\t\tnum_skaters = len(self.skaters_df.index)\n\t\tnum_goalies = len(self.goalies_df.index)\n\t\tteams = list(set(self.skaters_df['team'].values))\n\t\tnum_teams = len(teams)\n\n\t\t#Create player position indicators so you know which position they are playing\n\t\tpositions = {'C':[], 'W':[], 'D':[]}\n\t\tfor pos in self.skaters_df.loc[:, 'pos']:\n\t\t\tfor key in positions:\n\t\t\t\tpositions[key].append(1 if key in pos else 0)\n\t\t\n\t\t#Create player line indicators so you know which line by their team they are on\n\t\tteam_lines = []\n\t\tfor i, line in enumerate(self.skaters_df.loc[:, 'line']):\n\t\t\tplayer_line = []\n\t\t\tif int(line) == 1:\n\t\t\t\tplayer_line.extend((1, 0, 0, 0))\n\t\t\telif int(line) == 2:\n\t\t\t\tplayer_line.extend((0, 1, 0, 0))\n\t\t\telif int(line) == 3:\n\t\t\t\tplayer_line.extend((0, 0, 1, 0))\n\t\t\telif int(line) == 4:\n\t\t\t\tplayer_line.extend((0, 0, 0, 1))\n\t\t\telse:\n\t\t\t\tplayer_line.extend((0, 0, 0, 0))\n\t\t\tplayer_lines = []\n\t\t\tfor team in teams:\n\t\t\t\tif self.skaters_df.loc[i, 'team'] == team:\n\t\t\t\t\tplayer_lines.extend(player_line)\n\t\t\t\telse:\n\t\t\t\t\tplayer_lines.extend((0, 0, 0, 0))\n\t\t\tteam_lines.append(player_lines)\n\t\tnum_lines = len(team_lines[0])\n\t\t\n\t\t#NOTE: Maybe add PP line indicators\n\n\t\t#Create player team indicators so you know which team they are on\n\t\tskaters_teams = []\n\t\tfor player_team in self.skaters_df.loc[:, 'team']:\n\t\t\tskaters_teams.append([1 if player_team == team else 0 for team in teams])\n\n\t\t#Create goalie opponent indicators so you know who the goalie is opposing\n\t\tgoalies_opponents = []\n\t\tfor player_opp in self.skaters_df.loc[:, 'opp']:\n\t\t\tgoalies_opponents.append([1 if player_opp == team else 0 for team in self.goalies_df.loc[:, 'team']])\n\n\t\t#Generate the lineups\n\t\tlineups = []\n\t\tfor _ in tqdm(range(1, self.num_lineups+1)):\n\t\t\tlineup = self.type_1(lineups, positions, team_lines, skaters_teams, goalies_opponents, num_skaters, num_goalies, num_teams, num_lines)\n\t\t\tif lineup:\n\t\t\t\tlineups.append(lineup)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t#Fill the lineups with player names\n\t\tself.fill_lineups(lineups, positions, num_skaters, num_goalies)",
"def scrape_tournament_lineups(competition_name, tournament_id, edition_ids):\n\n l = []\n for edition_id in edition_ids:\n urls = scrape_fifa_scoreboard(tournament_id, edition_id)\n for url in urls:\n l.extend(scrape_fifa_lineups(url, competition_name))\n return l",
"def team_map():\n teams = Data.find_all_teams()\n res = {t[0]: t[-1] for t in teams}\n return jsonify(res)",
"def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id",
"def line_inj_odds(team_name):\n\n team_lineups = [[], []]\n team_injuries = [[], []]\n betting_data = ['N/A', 'N/A', 'N/A']\n\n response = Selector(text=requests.get(\"https://www.rotowire.com/basketball/nba-lineups.php\").text)\n all_games = response.xpath('.//div[@class=\"lineup is-nba\"]')\n\n # Find game box that contains correct game\n game_idx = None\n\n for i, game in enumerate(all_games):\n away = game.xpath('./div[2]/div[2]/a[1]/text()').get().strip()\n home = game.xpath('./div[2]/div[2]/a[2]/text()').get().strip()\n\n if home == team_name or away == team_name:\n game_idx = i\n\n if game_idx is None: # Return out of function if game not found\n return team_lineups, team_injuries, betting_data\n\n game = all_games[game_idx]\n\n for side in [0, 1]:\n lineups = game.xpath(f'./div[2]/div[3]/ul[{side + 1}]')\n lineup_len = game.xpath(f'./div[2]/div[3]/ul[{side + 1}]/li')\n\n for player in range(2, 7):\n pos = lineups.xpath(f'./li[{player}]/div[1]/text()').get()\n name = lineups.xpath(f'./li[{player}]/a[1]/text()').get()\n des = lineups.xpath(f'./li[{player}]/span[1]/text()').get()\n\n if des:\n name = f'{name} - *{des}*'\n team_lineups[side].append((pos, name))\n\n for injury in range(8, len(lineup_len) + 1):\n name = lineups.xpath(f'./li[{injury}]/a[1]/text()').get()\n des = lineups.xpath(f'./li[{injury}]/span[1]/text()').get()\n if name and des:\n team_injuries[side].append((name, des))\n\n ml = game.xpath(f'.//div[@class=\"lineup__odds is-row\"]/div[1]/span[1]/text()').get()\n spread = game.xpath(f'.//div[@class=\"lineup__odds is-row\"]/div[2]/span[1]/text()').get()\n ou = game.xpath(f'.//div[@class=\"lineup__odds is-row\"]/div[3]/span[1]/text()').get()\n ou = ou.strip(' Pts')\n\n betting_data = [ml, spread, ou]\n\n return team_lineups, team_injuries, betting_data",
"def make_player_stats(tournament, lineups):\n positions = find_player_positions(lineups)\n positions = positions.set_index('player_id')[['player_position']]\n player_stats = load_player_stats(tournament)\n player_stats.set_index('player_id', inplace=True)\n mask = player_stats['goals'] > player_stats['shots']\n player_stats.loc[mask, 'shots'] = player_stats[mask]['goals']\n res = player_stats.join(positions)\n res = res[pd.notna(res['player_position'])]\n return res",
"def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list",
"def get_ipl_player_to_users_mapping(teams=None):\n\n ipl_players = defaultdict(list)\n for user_id in USER_IDS:\n for player in get_squad_details(user_id)['players']:\n player_details = get_player(player)\n if teams and not player_details['team'] in teams:\n continue\n ipl_players[player_details.name].append(\n get_league_team_name_for_user(user_id))\n\n return ipl_players",
"def process_team(this_team, nodes, file_obj_out):\n for i, player_one in enumerate(this_team):\n for j, player_two in enumerate(this_team):\n if j > i and nodes[player_one] != nodes[player_two]:\n # write the source id and target id to file\n print(nodes[player_one], nodes[player_two],\n player_one + \" - \" + player_two,\n sep=',', file=file_obj_out)",
"def scrape_fifa_lineups(url, competition):\n # Not quite finished.\n\n data = scrape_url(url)\n data = data.split(\"<h2>Advertisement</h2>\")[0]\n soup = BeautifulSoup(data)\n\n game_data = scrape_fifa_game(url, competition)\n\n def process_lineup(rows, team):\n process_name = lambda s: s.strip().replace(\"(C)\", '').replace(\"(GK)\", '').title()\n\n l = []\n starters = rows[:11]\n subs = rows[11:]\n\n if team == game_data['team1']:\n goals_for, goals_against = game_data['team1_score'], game_data['team2_score']\n elif team == game_data['team2']:\n goals_for, goals_against = game_data['team2_score'], game_data['team1_score']\n else:\n import pdb; pdb.set_trace()\n\n lineup_re = re.compile(\"(.*?)\\(-(\\d+)'(?: Ht)?\\)\")\n\n\n\n # Doesn't handle multiple subs yet.\n for starter in starters:\n starter = get_contents(starter)\n \n m = lineup_re.search(starter)\n\n if m:\n name, off = m.groups()\n else:\n name = starter\n off = 'end'\n off = 90\n\n l.append({\n 'name': process_name(name),\n 'on': 0,\n 'off': int(off),\n 'team': team, \n 'competition': competition,\n 'season': game_data['season'],\n 'date': game_data['date'],\n 'source': url,\n 'goals_for': goals_for,\n 'goals_against': goals_against,\n })\n\n for sub in subs:\n\n sub = get_contents(sub)\n\n # Clean up these mysterious dudes.\n # Diego LOPEZ (+69')(-69')\n #m = re.search(\"(.*?)\\(\\+(\\d+)'\\)(\\-(\\d+)'\\)\", sub)\n m = False\n if m:\n print(\"Confusing appearances %s\" % sub)\n name, _, _ = m.groups()\n\n m = lineup_re.search(sub)\n if m:\n name, on = m.groups()\n off = 90\n else:\n m = lineup_re.search(sub)\n if m:\n name, off = m.groups()\n on = 90\n else:\n name = sub\n on = off = 0\n\n if m:\n l.append({\n 'name': process_name(name),\n 'on': int(on),\n 'off': off,\n 'team': team, \n 'competition': competition,\n 'season': game_data['season'],\n 'date': game_data['date'],\n 'source': url\n })\n\n return l\n \n number_re = re.compile(\"\\[\\d+\\]\")\n rows = [e for e in rows if e.strip()]\n rows = [e for e in rows if not number_re.search(e)]\n return rows\n\n home = soup.find(\"div\", \"lnupTeam\").findAll(\"span\")\n away = soup.find(\"div\", \"lnupTeam away\").findAll(\"span\")\n \n home_lineup = process_lineup(home, game_data['team1'])\n away_lineup = process_lineup(away, game_data['team2'])\n\n return home_lineup + away_lineup",
"def get_existing_psn_team_and_group_ids():\n\tudb = UserPageDB()\n\ttry:\n\t\tpsns = udb.existing_psn_team_and_group_ids()\n\t\treturn [(p[0],p[1]) for p in psns]\n\tfinally:\n\t\tudb.close()",
"def extract_team_data(game_data):\n team_id = game_data[1]\n team_abbrev = game_data[2]\n team_name = game_data[3]\n total_minutes_all_players = game_data[8]\n field_goals_made = game_data[9]\n field_goals_attempted = game_data[10]\n three_ptr_made = game_data[12]\n three_ptr_attempted = game_data[13]\n free_throws_made = game_data[15]\n free_throws_attempted = game_data[16]\n offensive_rebounds = game_data[18]\n defensive_rebounds = game_data[19]\n rebounds = game_data[20]\n assists = game_data[21]\n steals = game_data[22]\n blocks = game_data[23]\n turnovers = game_data[24]\n personal_fouls = game_data[25]\n points = game_data[26]\n plus_minus = game_data[27]\n\n final_dict = {\n \"team_id\": team_id,\n \"team_abbrev\": team_abbrev,\n \"team_name\": team_name,\n \"total_minutes_all_players\": total_minutes_all_players,\n \"field_goals_made\": field_goals_made,\n \"field_goals_attempted\": field_goals_attempted,\n \"three_ptr_made\": three_ptr_made,\n \"three_ptr_attempted\": three_ptr_attempted,\n \"free_throws_made\": free_throws_made,\n \"free_throws_attempted\": free_throws_attempted,\n \"offensive_rebounds\": offensive_rebounds,\n \"defensive_rebounds\": defensive_rebounds,\n \"rebounds\": rebounds,\n \"assists\": assists,\n \"steals\": steals,\n \"blocks\": blocks,\n \"turnovers\": turnovers,\n \"personal_fouls\": personal_fouls,\n \"points\": points,\n \"plus_minus\": plus_minus\n }\n return final_dict",
"def get_single_game_team_data(game, grouped_shot_data, pp_sit_data):\n game_stat_lines = list()\n game_id = game['game_id']\n home_id = game['home_id']\n road_id = game['road_id']\n game_type = get_game_type_from_season_type(game)\n\n home_stats_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_team_stats',\n str(game['season']), str(game_type), \"%d_%d.json\" % (game_id, home_id))\n road_stats_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_team_stats',\n str(game['season']), str(game_type), \"%d_%d.json\" % (game_id, road_id))\n\n # loading raw team game stats (if available)\n raw_stats = dict()\n if os.path.isfile(home_stats_src_path):\n raw_stats['home'] = json.loads(open(home_stats_src_path).read())\n else:\n raw_stats['home'] = dict()\n if os.path.isfile(road_stats_src_path):\n raw_stats['road'] = json.loads(open(road_stats_src_path).read())\n else:\n raw_stats['road'] = dict()\n\n # counting penalties per team\n penalty_counts = get_penalty_counts(game)\n\n for key in ['home', 'road']:\n opp_key = 'road' if key == 'home' else 'home'\n game_stat_line = dict()\n # basic game information\n game_stat_line['game_date'] = game['date']\n game_stat_line['weekday'] = game['weekday']\n game_stat_line['season'] = game['season']\n game_stat_line['season_type'] = game['season_type']\n game_stat_line['round'] = game['round']\n game_stat_line['game_id'] = game_id\n game_stat_line['team_id'] = game[\"%s_id\" % key]\n game_stat_line['team'] = game[\"%s_abbr\" % key]\n game_stat_line['opp_team_id'] = game[\"%s_id\" % opp_key]\n game_stat_line['opp_team'] = game[\"%s_abbr\" % opp_key]\n # identifying team's and opposing team's division (if\n # applicable for current season and season type)\n if (game['season'], game['season_type']) in divisions:\n current_divisions = divisions[game['season'], game['season_type']]\n game_stat_line['division'] = current_divisions[game_stat_line['team']]\n game_stat_line['opp_division'] = current_divisions[game_stat_line['opp_team']]\n # TODO: reactivate when schedule game id is available again\n # game_stat_line['schedule_game_id'] = game['schedule_game_id']\n game_stat_line['arena'] = correct_name(game['arena'])\n game_stat_line['attendance'] = game['attendance']\n if game_stat_line['arena'] in capacities:\n game_stat_line['capacity'] = capacities[game_stat_line['arena']]\n else:\n print(\n \"\\t+ Unable to retrieve capacity \" +\n \"for '%s'\" % game_stat_line['arena'])\n game_stat_line['capacity'] = 0\n # coaches and referees\n if \"%s_coach\" % key in game:\n game_stat_line['coach'] = correct_name(\n game[\"%s_coach\" % key], game['date'])\n if game_stat_line['coach'] not in coaches:\n print(\"+ Unknown coach '%s'\" % game_stat_line['coach'])\n else:\n print(\"\\t+ No coach information found for %s in game %d\" % (\n game_stat_line['team'], game_id))\n game_stat_line['coach'] = correct_name(\n \"%d_%s\" % (game_id, game_stat_line['team']))\n print(\"\\t+ Adjusted to '%s'\" % game_stat_line['coach'])\n if \"%s_coach\" % opp_key in game:\n game_stat_line['opp_coach'] = correct_name(\n game[\"%s_coach\" % opp_key], game['date'])\n if game_stat_line['opp_coach'] not in coaches:\n print(\"+ Unknown coach '%s'\" % game_stat_line['opp_coach'])\n else:\n print(\n \"\\t+ No opposition coach information found \" +\n \"for %s in game %d\" % (game_stat_line['opp_team'], game_id))\n game_stat_line['opp_coach'] = correct_name(\n \"%d_%s\" % (game_id, game_stat_line['opp_team']))\n print(\"\\t+ Adjusted to '%s'\" % game_stat_line['opp_coach'])\n game_stat_line['ref_1'] = correct_name(game['referee_1'])\n game_stat_line['ref_2'] = correct_name(game['referee_2'])\n game_stat_line['lma_1'] = correct_name(game['linesman_1'])\n game_stat_line['lma_2'] = correct_name(game['linesman_2'])\n # outcomes\n game_stat_line['games_played'] = 1\n game_stat_line['home_road'] = key\n game_stat_line['score'] = game[\"%s_score\" % key]\n game_stat_line['goals'] = game[\"%s_score\" % key]\n game_stat_line['opp_score'] = game[\"%s_score\" % opp_key]\n game_stat_line['opp_goals'] = game[\"%s_score\" % opp_key]\n # optionally correcting game scores\n if game_id in game_score_corrections:\n for team_abbr in game_score_corrections[game_id]:\n if game_stat_line['team'] == team_abbr:\n game_stat_line['score'] = game_score_corrections[game_id][team_abbr]\n if game_stat_line['opp_team'] == team_abbr:\n game_stat_line['opp_score'] = game_score_corrections[game_id][team_abbr]\n if game['shootout_game']:\n game_stat_line['game_type'] = 'SO'\n elif game['overtime_game']:\n game_stat_line['game_type'] = 'OT'\n else:\n game_stat_line['game_type'] = ''\n for gsl_key in ['w', 'rw', 'ow', 'sw', 'l', 'rl', 'ol', 'sl']:\n game_stat_line[gsl_key] = 0\n if game_stat_line['score'] > game_stat_line['opp_score']:\n game_stat_line['w'] += 1\n if game['shootout_game']:\n game_stat_line['sw'] += 1\n game_stat_line['goals'] -= 1\n elif game['overtime_game']:\n game_stat_line['ow'] += 1\n else:\n game_stat_line['rw'] += 1\n else:\n game_stat_line['l'] += 1\n if game['shootout_game']:\n game_stat_line['sl'] += 1\n game_stat_line['opp_goals'] -= 1\n elif game['overtime_game']:\n game_stat_line['ol'] += 1\n else:\n game_stat_line['rl'] += 1\n game_stat_line['points'] = (\n game_stat_line['rw'] * 3 + game_stat_line['ow'] * 2 +\n game_stat_line['sw'] * 2 + game_stat_line['sl'] * 1 +\n game_stat_line['ol'] * 1)\n # per-period goals\n for period in [1, 2, 3]:\n game_stat_line[\"goals_%d\" % period] = game[\n \"%s_goals_%d\" % (key, period)]\n game_stat_line[\"opp_goals_%d\" % period] = game[\n \"%s_goals_%d\" % (opp_key, period)]\n # empty-net and extra-attacker goals\n game_stat_line['en_goals'] = game[\"%s_en_goals\" % key]\n game_stat_line['ea_goals'] = game[\"%s_ea_goals\" % key]\n game_stat_line['opp_en_goals'] = game[\"%s_en_goals\" % opp_key]\n game_stat_line['opp_ea_goals'] = game[\"%s_ea_goals\" % opp_key]\n # situation after 20 and 40 minutes respectively\n for situation in [\n 'tied20', 'lead20', 'trail20', 'tied40', 'lead40', 'trail40'\n ]:\n game_stat_line[situation] = False\n if game_stat_line['goals_1'] == game_stat_line['opp_goals_1']:\n game_stat_line['tied20'] = True\n elif game_stat_line['goals_1'] > game_stat_line['opp_goals_1']:\n game_stat_line['lead20'] = True\n else:\n game_stat_line['trail20'] = True\n goals40 = game_stat_line['goals_1'] + game_stat_line['goals_2']\n opp_goals40 = (\n game_stat_line['opp_goals_1'] + game_stat_line['opp_goals_2'])\n if goals40 == opp_goals40:\n game_stat_line['tied40'] = True\n elif goals40 > opp_goals40:\n game_stat_line['lead40'] = True\n else:\n game_stat_line['trail40'] = True\n # scored first?\n if game['first_goal'] == game_stat_line['team']:\n game_stat_line['scored_first'] = True\n game_stat_line['trailed_first'] = False\n elif game['first_goal'] == game_stat_line['opp_team']:\n game_stat_line['scored_first'] = False\n game_stat_line['trailed_first'] = True\n # one-goal, two-goal, three-goal, four-goal-game?\n for goal_game in ['one_goal', 'two_goal', 'three_goal', 'four_goal']:\n game_stat_line[goal_game] = False\n score_diff = abs(\n (game_stat_line['score'] - game_stat_line['en_goals']) -\n (game_stat_line['opp_score'] - game_stat_line['opp_en_goals']))\n # in case the right amount of empty-net goals have been scored, we\n # may end up with a score differential of zero, see game between STR\n # and ING on Mar 3, 2019\n if not score_diff:\n game_stat_line['zero_goal'] = True\n if score_diff == 1:\n game_stat_line['one_goal'] = True\n elif score_diff == 2:\n game_stat_line['two_goal'] = True\n elif score_diff == 3:\n game_stat_line['three_goal'] = True\n elif score_diff > 3:\n game_stat_line['four_goal'] = True\n\n # retrieving score state time spans for current team\n game_stat_line['time_played'] = game['time_played']\n game_stat_line['tied'] = game['tied']\n game_stat_line['tied_pctg'] = round(\n game['tied'] / game['time_played'] * 100, 2)\n if key == 'home':\n game_stat_line['leading'] = game['home_leading']\n game_stat_line['trailing'] = game['road_leading']\n else:\n game_stat_line['leading'] = game['road_leading']\n game_stat_line['trailing'] = game['home_leading']\n game_stat_line['leading_pctg'] = round(\n game_stat_line['leading'] / game['time_played'] * 100, 2)\n game_stat_line['trailing_pctg'] = round(\n game_stat_line['trailing'] / game['time_played'] * 100, 2)\n\n # retrieving raw stats for team and opposing team\n for category, raw_category in RAW_STATS_MAPPING:\n game_stat_line[category] = raw_stats[key].get(raw_category, None)\n game_stat_line[\"opp_%s\" % category] = raw_stats[opp_key].get(raw_category, None)\n\n # checking number of power play goals retrieved from team stats with those registered in event data\n game_stat_line = check_pp_goals(game, key, opp_key, game_stat_line)\n\n # calculating shooting percentages\n if game_stat_line['shots_on_goal']:\n game_stat_line['shot_pctg'] = round(\n game_stat_line['goals'] /\n game_stat_line['shots_on_goal'] * 100., 2)\n else:\n game_stat_line['shot_pctg'] = None\n if game_stat_line['opp_shots_on_goal']:\n game_stat_line['opp_shot_pctg'] = round(\n game_stat_line['opp_goals'] /\n game_stat_line['opp_shots_on_goal'] * 100., 2)\n else:\n game_stat_line['opp_shot_pctg'] = None\n # calculating save percentages\n if game_stat_line['opp_shots_on_goal']:\n game_stat_line['save_pctg'] = round(\n 100 - game_stat_line['opp_goals'] /\n game_stat_line['opp_shots_on_goal'] * 100., 2)\n else:\n game_stat_line['save_pctg'] = None\n if game_stat_line['shots_on_goal']:\n game_stat_line['opp_save_pctg'] = round(\n 100 - game_stat_line['goals'] /\n game_stat_line['shots_on_goal'] * 100., 2)\n else:\n game_stat_line['opp_save_pctg'] = None\n # calculating pdo values\n if (\n game_stat_line['shot_pctg'] is not None and\n game_stat_line['save_pctg'] is not None\n ):\n game_stat_line['pdo'] = round((\n game_stat_line['shot_pctg'] +\n game_stat_line['save_pctg']), 1)\n game_stat_line['opp_pdo'] = round((\n game_stat_line['opp_shot_pctg'] +\n game_stat_line['opp_save_pctg']), 1)\n # calculating power play percentages\n if game_stat_line['pp_opps']:\n game_stat_line['pp_pctg'] = round((\n game_stat_line['pp_goals'] /\n game_stat_line['pp_opps']) * 100., 1)\n else:\n game_stat_line['pp_pctg'] = 0\n if game_stat_line['opp_pp_opps']:\n game_stat_line['opp_pp_pctg'] = round((\n game_stat_line['opp_pp_goals'] /\n game_stat_line['opp_pp_opps']) * 100., 1)\n else:\n game_stat_line['opp_pp_pctg'] = 0\n # calculating penalty killing percentages\n if game_stat_line['sh_opps']:\n game_stat_line['pk_pctg'] = round(\n 100 - game_stat_line['opp_pp_goals'] /\n game_stat_line['sh_opps'] * 100., 1)\n else:\n game_stat_line['pk_pctg'] = 0\n if game_stat_line['opp_sh_opps']:\n game_stat_line['opp_pk_pctg'] = round(\n 100 - game_stat_line['pp_goals'] /\n game_stat_line['opp_sh_opps'] * 100., 1)\n else:\n game_stat_line['opp_pk_pctg'] = 0\n game_stat_line['ev_goals'] = (\n game_stat_line['goals'] -\n game_stat_line['pp_goals'] -\n game_stat_line['sh_goals'])\n game_stat_line['opp_ev_goals'] = (\n game_stat_line['opp_goals'] -\n game_stat_line['opp_pp_goals'] -\n game_stat_line['opp_sh_goals'])\n # faceoffs are treated separately since each of the team game stats\n # datasets only contains the number of won faceoffs and sometimes this\n # one is stored as a string (wtf?)\n game_stat_line['faceoffs_won'] = int(\n raw_stats[key].get('faceOffsWon', 0))\n game_stat_line['faceoffs_lost'] = int(\n raw_stats[opp_key].get('faceOffsWon', 0))\n # calculating overall number of faceoffs and faceoff percentage\n game_stat_line['faceoffs'] = (\n game_stat_line['faceoffs_won'] + game_stat_line['faceoffs_lost'])\n if game_stat_line['faceoffs']:\n game_stat_line['faceoff_pctg'] = round(\n game_stat_line['faceoffs_won'] /\n game_stat_line['faceoffs'] * 100., 1)\n else:\n game_stat_line['faceoff_pctg'] = 0.\n # best players\n game_stat_line['best_plr_id'] = game.get(\n \"%s_best_player_id\" % key, None)\n game_stat_line['best_plr'] = game.get(\"%s_best_player\" % key, None)\n game_stat_line['opp_best_plr_id'] = game.get(\n \"%s_best_player_id\" % opp_key, None)\n game_stat_line['opp_best_plr'] = game.get(\n \"%s_best_player\" % opp_key, None)\n # game-winning-goal\n game_stat_line['gw_goal_team'] = game['gw_goal']\n game_stat_line['gw_goal_player_id'] = game['gw_goal_player_id']\n game_stat_line['gw_goal_first_name'] = game['gw_goal_first_name']\n game_stat_line['gw_goal_last_name'] = game['gw_goal_last_name']\n\n shot_zones_to_retain = ['slot', 'left', 'right', 'blue_line']\n shot_situations_to_retain = [\n 'shots_ev', 'shots_5v5', 'shots_pp', 'shots_sh', 'shots_unblocked',\n 'shots_unblocked_ev', 'shots_unblocked_5v5', 'shots_unblocked_pp',\n 'shots_unblocked_sh', 'shots_on_goal_ev', 'shots_on_goal_5v5',\n 'shots_on_goal_pp', 'shots_on_goal_sh', 'goals_5v5', 'hit_post']\n\n # retrieving shot data for current game and team\n shot_data = grouped_shot_data.get(\n (game_id, game_stat_line['team']), list())\n for item in shot_data:\n if item.startswith(tuple(shot_zones_to_retain)):\n abbr_item = item\n for zone_key, replacement in SHOT_ZONE_ABBREVIATIONS.items():\n abbr_item = abbr_item.replace(zone_key, replacement)\n game_stat_line[abbr_item] = shot_data[item]\n elif item in shot_situations_to_retain:\n game_stat_line[item] = shot_data[item]\n\n # retrieving shots against data for current game and team\n shot_against_data = grouped_shot_data.get(\n (game_id, game_stat_line['opp_team']), list())\n for item in shot_against_data:\n if item.startswith(tuple(shot_zones_to_retain)):\n abbr_item = item\n for zone_key, replacement in SHOT_ZONE_ABBREVIATIONS.items():\n abbr_item = abbr_item.replace(zone_key, replacement)\n game_stat_line[\"%s_a\" % abbr_item] = shot_against_data[item]\n elif item in shot_situations_to_retain:\n game_stat_line[\"opp_%s\" % item] = shot_against_data[item]\n\n try:\n game_stat_line['ev_cf_pctg'] = round(\n game_stat_line['shots_ev'] / (game_stat_line['shots_ev'] + game_stat_line['opp_shots_ev']) * 100, 2)\n except KeyError:\n print(\"\\t+Unable to calculate even strength shots for percentage\")\n game_stat_line['ev_cf_pctg'] = None\n\n for penalty_duration in [2, 5, 10, 20]:\n if penalty_counts[key] and penalty_duration in penalty_counts[key]:\n game_stat_line[\"penalty_%d\" % penalty_duration] = (\n penalty_counts[key][penalty_duration])\n else:\n game_stat_line[\"penalty_%d\" % penalty_duration] = 0\n\n game_stat_line['pp_5v4'] = pp_sit_data[key]['pp_sits']['5v4']\n game_stat_line['pp_5v3'] = pp_sit_data[key]['pp_sits']['5v3']\n game_stat_line['pp_4v3'] = pp_sit_data[key]['pp_sits']['4v3']\n game_stat_line['ppg_5v4'] = pp_sit_data[key]['pp_goals']['5v4']\n game_stat_line['ppg_5v3'] = pp_sit_data[key]['pp_goals']['5v3']\n game_stat_line['ppg_4v3'] = pp_sit_data[key]['pp_goals']['4v3']\n game_stat_line['opp_pp_5v4'] = pp_sit_data[opp_key]['pp_sits']['5v4']\n game_stat_line['opp_pp_5v3'] = pp_sit_data[opp_key]['pp_sits']['5v3']\n game_stat_line['opp_pp_4v3'] = pp_sit_data[opp_key]['pp_sits']['4v3']\n game_stat_line['opp_ppg_5v4'] = pp_sit_data[opp_key]['pp_goals']['5v4']\n game_stat_line['opp_ppg_5v3'] = pp_sit_data[opp_key]['pp_goals']['5v3']\n game_stat_line['opp_ppg_4v3'] = pp_sit_data[opp_key]['pp_goals']['4v3']\n\n # opp_diff = game_stat_line['pp_opps'] - (\n # game_stat_line['pp_5v4'] +\n # game_stat_line['pp_5v3'] +\n # game_stat_line['pp_4v3']\n # )\n # if opp_diff:\n # print(\"\\tpp opp discrepancy of %d for %s\" % (opp_diff, key))\n\n # registering shootout stats (if applicable)\n shootout_stats = get_shootout_stats(game, key, opp_key)\n if shootout_stats:\n game_stat_line = {**game_stat_line, **shootout_stats}\n\n game_stat_lines.append(game_stat_line)\n\n return game_stat_lines",
"def team_names(root):\n\t\thomebranch = root[0][0].find('{http://feed.elasticstats.com/schema/soccer/sr/v2/matches-summary.xsd}home')\n\t\thomename = (homebranch.attrib.get('alias'))\n\t\thomeid = (homebranch.attrib.get('id'))\n\n\n\t\tawaybranch = root[0][0].find('{http://feed.elasticstats.com/schema/soccer/sr/v2/matches-summary.xsd}away')\n\t\tawayname = (awaybranch.attrib.get('alias'))\n\t\tawayid = (awaybranch.attrib.get('id'))\n\n\t\tteamnames = {homeid:homename, awayid:awayname}\n\t\treturn(teamnames)",
"def winning_team(self):\n return self.team_id",
"def get_players_id(player_number):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # getting the last eight players\n id_list = []\n for i in range(1, player_number + 1):\n # getting player\n data = players_table.all()[-i]\n # Obtaining a user ID\n id_list.append(data.doc_id)\n return id_list"
] | [
"0.63278043",
"0.5798467",
"0.57650906",
"0.576339",
"0.54533684",
"0.54256725",
"0.5386581",
"0.53313106",
"0.5272173",
"0.5270027",
"0.5256748",
"0.52523595",
"0.52510047",
"0.5233039",
"0.5219826",
"0.52058655",
"0.5196101",
"0.51753414",
"0.5173734",
"0.5145153",
"0.51366085",
"0.5135135",
"0.5120815",
"0.50894344",
"0.5076575",
"0.50641",
"0.5015501",
"0.50060034",
"0.49843174",
"0.498337"
] | 0.8228265 | 0 |
returns the number of seconds that have elapsed since the previous event | def seconds_since_previous_event(self):
if self.previous_event is None:
return 0
if self.seconds_remaining == 720:
# so that subs between periods for live don't have negative seconds
return 0
if self.seconds_remaining == 300 and self.period > 4:
# so that subs between periods for live don't have negative seconds
return 0
return self.previous_event.seconds_remaining - self.seconds_remaining | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_since_last_state_change(self):\n current_time = rospy.get_rostime()\n difference = current_time - self._timestamps['last_state_change']\n return difference.to_sec()",
"def elapsed():\n global start_time\n return time.time() - start_time",
"def elapsed(self):\n return self.__last_time() - self.__start",
"def time_remaining(self) -> float:\n\n return self.event.time - time.time()",
"def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start",
"def secondsPassed(self)->int:\n return 0 if not self.used else int((datetime.utcnow() - self.firstAccessDate).total_seconds())",
"def elapsed(self):\n return datetime.datetime.now() - self.start",
"def seconds_from_last_update(self):\n return (datetime.utcnow() - self.last_update_datetime).total_seconds()",
"def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()",
"def get_elapsed_seconds():\n\tutcnow = datetime.utcnow()\n\tmidnight_utc = datetime.combine(utcnow.date(), time(0))\n\tdelta = utcnow - midnight_utc\n\treturn delta.total_seconds()",
"def get_elapsed_time(self):\n return self.app.get_elapsed_time() - self._pause_time",
"def get_elapsed(self):\n delta = self._now() - self.start\n return delta.microseconds / 1000.0",
"def get_elapsed_time(self):\n if hasattr(self, 'starttime'):\n return monotonic() - self.starttime\n else:\n return 0",
"def duration(self):\r\n return self.stop - self.start",
"def secondsPassed(self)->int:\n return self._lic.params['sessionTimeUsed'].value",
"def duration(self):\r\n return self.t2 - self.t1",
"def duration(self):\n if self._exc_end and self._inc_begin:\n return self._exc_end - self._inc_begin\n return 0",
"def seconds_remaining(self):\n pass",
"def elapsed(self):\n return str(datetime.datetime.now() - self.start).split('.')[0]",
"def elapsed_time(self) -> float:\n current_time = datetime.utcnow()\n start = self.start_time or current_time\n end = self.end_time or current_time\n return (end - start).total_seconds()",
"def seconds(self):\n end = self.end or timezone.now()\n result = end - self.start\n return result.seconds",
"def total_seconds(self):\n return 0",
"def elapsed_time_in_seconds(self):\n return self._elapsed_time_in_seconds",
"def time(self):\n return pygame.time.get_ticks() - self.start_time",
"def elapsed_time(self):\n # reset timer if game is not started\n if not self.started:\n self.timestamp = None\n return 0\n # sets the first timer\n if self.timestamp is None:\n self.timestamp = time.time()\n return 0\n # if there is a previous timer check elapsed time\n else:\n elapsed_time = time.time() - self.timestamp\n # if elapsed_time is larger than the maximum time, reset timer\n if elapsed_time >= self.max_time:\n self.timestamp = self.max_time\n return elapsed_time",
"def secondsPassed(self)->int:\n return self._lic.params['usedDurationInSeconds'].value",
"def duration(self):\n\n ended = time.time() if self.ended is None else self.ended\n return ended - self.started",
"def elapsed(self):\n if self.start and self.end:\n return (self.end - self.start).total_seconds()\n return None",
"def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.secondsTotal else self.secondsTotal - self.secondsPassed",
"def elapsed(self):\n if self.begin and self.end:\n return (self.end - self.begin).total_seconds()\n else:\n raise ValueError"
] | [
"0.73553234",
"0.73396635",
"0.7332249",
"0.7310087",
"0.7274196",
"0.72670037",
"0.7216402",
"0.71862936",
"0.71495765",
"0.71136665",
"0.70829964",
"0.70781296",
"0.7034211",
"0.697659",
"0.69762397",
"0.696461",
"0.69368035",
"0.6934867",
"0.69290966",
"0.68871796",
"0.68822765",
"0.68631464",
"0.6861986",
"0.68568295",
"0.6838469",
"0.68013287",
"0.6798386",
"0.6779388",
"0.6772307",
"0.6765769"
] | 0.8197923 | 0 |
returns True if the event takes place after an offensive rebound on the current possession, False otherwise | def is_second_chance_event(self):
event = self.previous_event
if isinstance(event, Rebound) and event.is_real_rebound and event.oreb:
return True
while not (event is None or event.is_possession_ending_event):
if isinstance(event, Rebound) and event.is_real_rebound and event.oreb:
return True
event = event.previous_event
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_possession_ending_event(self):\n pass",
"def is_onhold(self) -> bool:",
"def is_on(self) -> bool:\n return self.event.is_tripped",
"def shooting(self):\r\n return not self.stopped",
"def is_penalty_event(self):\n if hasattr(self, \"fouls_to_give\"):\n team_ids = list(self.current_players.keys())\n offense_team_id = self.get_offense_team_id()\n defense_team_id = (\n team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]\n )\n if self.fouls_to_give[defense_team_id] == 0:\n if isinstance(self, (Foul, FreeThrow, Rebound)):\n # if foul or free throw or rebound on a missed ft\n # check foul event and should return false is foul\n # was shooting foul and team had a foul to give\n if isinstance(self, Foul):\n foul_event = self\n elif isinstance(self, FreeThrow):\n foul_event = self.foul_that_led_to_ft\n else:\n # if rebound is on missed ft, also need to look at foul that led to FT\n if not self.oreb and isinstance(self.missed_shot, FreeThrow):\n foul_event = self.missed_shot.foul_that_led_to_ft\n else:\n return True\n if foul_event is None:\n return True\n fouls_to_give_prior_to_foul = (\n foul_event.previous_event.fouls_to_give[defense_team_id]\n )\n if fouls_to_give_prior_to_foul > 0:\n return False\n return True\n return False",
"def get_is_on(self, event: Event | None) -> bool:\n if event is None:\n return False\n\n now = dt_util.utcnow()\n value = now > event.start\n if value and event.end is not None and now > event.end:\n value = False\n\n return value",
"def isOn(self):\r\n return len(self.__agenda)>2",
"def has_happened(self):\n\n return self.end < timezone.now()",
"def is_on(self):\n return self._coordinator.get_event_timestamp(self._event_name) > 0",
"def event_processing_finished(self):\n if self.status in ACTIVE_STATES:\n return False # tally of events is only available at end of run\n try:\n event_qs = self.get_event_queryset()\n except NotImplementedError:\n return True # Model without events, such as WFJT\n return self.emitted_events == event_qs.count()",
"def after(self):\n return self.marker_seen and not self.throwaway",
"def is_on(self):\n return self._cur != -1",
"def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True",
"def is_done(self):\n return True if self.t >= self.max_ep_len else False",
"def event_check(self):\r\n if len(self.event_queue) > 0:\r\n event = self.event_queue.pop(0) # oldest\r\n self.event_queue_proc(event)\r\n return True\r\n return False",
"def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs",
"def count_as_possession(self):\n if self.is_possession_ending_event:\n if self.seconds_remaining > 2:\n return True\n # check when previous possession ended\n prev_event = self.previous_event\n while prev_event is not None and not prev_event.is_possession_ending_event:\n prev_event = prev_event.previous_event\n if prev_event is None or prev_event.seconds_remaining > 2:\n return True\n # possession starts in final 2 seconds\n # return True if there is a FT or FGM between now and end of period\n next_event = prev_event.next_event\n while next_event is not None:\n if isinstance(next_event, FreeThrow) or (\n isinstance(next_event, FieldGoal) and next_event.is_made\n ):\n return True\n next_event = next_event.next_event\n return False",
"def _has_arrived(self, context) -> bool:\n return self._target[0] == context.x and self._target[1] == context.y",
"def is_call_ended(self) -> bool:",
"def poll(cls, context):\r\n return context.object.animation_data.action is not None",
"def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False",
"def event_trigger(self, event):\n return False",
"def event_trigger(self, event):\n return False",
"def is_happening(self):\n now = timezone.now()\n start = self.start\n end = self.end\n happening = False\n # check that the event has started and 'now' is btwn start & end:\n if (now >= start) and (start.time() <= now.time() <= end.time()):\n happening = True\n return happening",
"def guard_liberate_transition(self):\n if self.get_free_positions:\n return True",
"def enemyCaptured(self):\n return self.game.team.flag.carrier != None",
"def should_poll(self):\r\n return False",
"def in_cooldown(self) -> bool:\n return self.cooldown_counter > 0",
"def touching(self):\n if self._touching != 0:\n return True\n return False",
"def is_triggered(self):\n return(self.order_master.limit_reached or self.order_master.stop_reached)"
] | [
"0.70515865",
"0.6717488",
"0.6612516",
"0.6464781",
"0.64172095",
"0.63652587",
"0.6318331",
"0.6302998",
"0.62632906",
"0.62373704",
"0.6230576",
"0.622687",
"0.62076503",
"0.6200433",
"0.61976546",
"0.6172198",
"0.6165521",
"0.61521184",
"0.6148918",
"0.6104865",
"0.6082942",
"0.60811913",
"0.60811913",
"0.6078108",
"0.60575515",
"0.6051083",
"0.60443544",
"0.6027035",
"0.60077494",
"0.60042524"
] | 0.69695365 | 1 |
returns True if the team on offense is in the penalty, False otherwise | def is_penalty_event(self):
if hasattr(self, "fouls_to_give"):
team_ids = list(self.current_players.keys())
offense_team_id = self.get_offense_team_id()
defense_team_id = (
team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]
)
if self.fouls_to_give[defense_team_id] == 0:
if isinstance(self, (Foul, FreeThrow, Rebound)):
# if foul or free throw or rebound on a missed ft
# check foul event and should return false is foul
# was shooting foul and team had a foul to give
if isinstance(self, Foul):
foul_event = self
elif isinstance(self, FreeThrow):
foul_event = self.foul_that_led_to_ft
else:
# if rebound is on missed ft, also need to look at foul that led to FT
if not self.oreb and isinstance(self.missed_shot, FreeThrow):
foul_event = self.missed_shot.foul_that_led_to_ft
else:
return True
if foul_event is None:
return True
fouls_to_give_prior_to_foul = (
foul_event.previous_event.fouls_to_give[defense_team_id]
)
if fouls_to_give_prior_to_foul > 0:
return False
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def winningTeamPenalty(r):\n \n #Check if home or away had more goals at the 'event' time\n homecheck = int(r['about.goals.home'] > r['about.goals.away'])\n awaycheck = int(r['about.goals.away'] > r['about.goals.home'])\n \n #If home had more goals and the penalty was on the home team, set to 1\n if (homecheck > 0) and (r['against.homeTeam'] == 1):\n return 1\n #If away had more and the penalty was not on home team, set to 1\n if (awaycheck > 0) and (r['against.homeTeam'] == 0):\n return 1\n #Any other situation should be a zero in this column\n else:\n return 0",
"def isFairForTeam(teamNumber):\r\n debug.write(\"Testing if experience gain is fair for player teams\", 2)\r\n if not 1 < int(teamNumber) < 4:\r\n debug.write(\"Incorrect teamnumber passed, teams are not fair\", 2)\r\n return False \r\n return bool( es.getlivingplayercount( 5 - int( teamNumber )))",
"def __bonusExists(self, tgtSuit, hp=1):\n tgtPos = self.activeSuits.index(tgtSuit)\n if hp:\n bonusLen = len(self.hpBonuses[tgtPos])\n else:\n bonusLen = len(self.kbBonuses[tgtPos])\n if bonusLen > 0:\n return 1\n return 0",
"def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False",
"def is_best_response(self, own_action, opponents_actions, tol=None):\n if tol is None:\n tol = self.tol\n\n payoff_vector = self.payoff_vector(opponents_actions)\n payoff_max = payoff_vector.max()\n\n if isinstance(own_action, numbers.Integral):\n return payoff_vector[own_action] >= payoff_max - tol\n else:\n return np.dot(own_action, payoff_vector) >= payoff_max - tol",
"def likely_to_be_offered(self):\n if self.score >= 5:\n return True\n return False",
"def is_won(self):\n return self.position == self.proposition.outcome and self.proposition.is_paid",
"def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response",
"def inCamp(self):\n return (((self.myTeam==1) and (self.ballPos.x <= self.width/2))\n | ((self.myTeam==2) and (self.ballPos.x >= self.width/2)))",
"def engageEnemyRobots(self, targetRobot):\n # self.log(\"engaging enemys\")\n enemyEngaged = False\n if SPECS.UNITS[self.me.unit].ATTACK_RADIUS[0] <= targetRobot['distance'] <= SPECS.UNITS[self.me.unit].ATTACK_RADIUS[1]: \n enemyEngaged = True\n return enemyEngaged",
"def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)",
"def check_score(pl1, pl2, game_on):\n for ply in pl1, pl2:\n if ply.score >= 30:\n game_on = False \n print \"Player \", ply.name, \" won the game with \", ply.score, \" points.\"\n return game_on",
"def is_spare(self):\n if self.is_strike():\n return False\n\n return (self.first_ball + self.second_ball) == 10",
"def goalReached(self, rewards):\n return len(rewards) >= 100 and np.mean(rewards[-100:]) >= 18",
"def current_involvement(doing, eid):\n if eid in doing: return True\n for dn in active_target_of(doing, eid):\n return True\n return False",
"def penalty_reward(reward):\n if reward < 0:\n return True\n return False",
"def game_won(self):\n return all((foundation.is_full() for foundation in self.foundations.values()))",
"def is_eligible(self) -> Optional[bool]:\n return pulumi.get(self, \"is_eligible\")",
"def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False",
"def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False",
"def game_is_tied(self):\n tie_score = False\n if self.my_score == self.opponent_score:\n tie_score = True\n my_moves = self.steps_available(self.loc)\n opponent_moves = self.steps_available(self.opponent_loc)\n if my_moves == 0 and opponent_moves == 0 and tie_score:\n return True\n else:\n penalty = self.penalty_score\n if my_moves == 0 and opponent_moves != 0:\n return (self.my_score - penalty) == self.opponent_score\n elif my_moves != 0 and opponent_moves == 0:\n return self.my_score == (self.opponent_score - penalty)\n else:\n return False",
"def enemyCaptured(self):\n return self.game.team.flag.carrier != None",
"def experience_match(volunteer, volunteers, slot):\n if not volunteer.first_time:\n return True\n elif slot.type != 'coach2':\n return True #Brittle alert. Uses knowledge of the task: that this only matters currently for persons at coach 2\n else:\n for volunteer2 in volunteers:\n for a_slot in volunteer2.assigned_slots:\n if a_slot.day == slot.day and a_slot.time_period == slot.time_period:\n #This should only match for coach 2 slots with same day and time period\n if volunteer2.first_time:\n #print(\"Not matching {} {} and {} {} for slot {} {} {}\".format(volunteer.first_name,\n # volunteer.last_name, volunteer2.first_name, volunteer2.last_name,\n # slot.day, slot.time_period, slot.type))\n return False\n #else:\n # print(\"OK to match {} {} and {} {} for slot {} {}\".format(volunteer.first_name,\n # volunteer.last_name, volunteer2.first_name, volunteer2.last_name,\n # slot.day, slot.time_period))\n return True",
"def passive(self, friendly_team, opposing_team, target):\n stat_buffs = 0\n for position in self.UNCOLLECTED_SOULS:\n if position > 0 and not opposing_team['team'][position] == None and opposing_team['team'][position].current_hp <= 0:\n stat_buffs += 2\n position -= 4\n stat_bonus = {\n 'armor': stat_buffs,\n 'resistance': stat_buffs,\n }\n self.set_stats(self.get_effective_stats(stat_bonus))",
"def is_enemy(self) -> bool:\n return self.proto.alliance == ALLIANCE.Enemy.value",
"def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False",
"def eligible(CGPA:float, Year:int, program:str) -> bool:\n return CGPA >= 2 and Year == (2 or 3) and program == \"CS\"",
"def __have_team(self, squad_id):\n return not self.data_engine[squad_id][\"team_criteria\"]",
"def canItakeEnemyShip(self, enemyShip):\n if self.assaultStrength/enemyShip.getPersonStrength() > 1.5:\n return 1\n return 0",
"def __checkPropBonus(self, track):\n result = False\n if self.battle.getInteractivePropTrackBonus() == track:\n result = True\n return result"
] | [
"0.6627246",
"0.6298236",
"0.6159958",
"0.6144886",
"0.61247426",
"0.6035521",
"0.6007446",
"0.5965502",
"0.5947476",
"0.59469855",
"0.5929833",
"0.5910404",
"0.5885881",
"0.58652747",
"0.5852705",
"0.5821209",
"0.5802464",
"0.58004534",
"0.5798505",
"0.57926786",
"0.5742253",
"0.572092",
"0.5705923",
"0.570519",
"0.5703815",
"0.5699349",
"0.5686902",
"0.5676525",
"0.5676202",
"0.5669642"
] | 0.70770013 | 0 |
returns True if event is possession changing event that should count as a real possession, False otherwise. In order to not include possessions which a very low probability of scoring in possession counts, possession won't be counted as a possession if it starts with <= 2 seconds left and no points are scored before period ends | def count_as_possession(self):
if self.is_possession_ending_event:
if self.seconds_remaining > 2:
return True
# check when previous possession ended
prev_event = self.previous_event
while prev_event is not None and not prev_event.is_possession_ending_event:
prev_event = prev_event.previous_event
if prev_event is None or prev_event.seconds_remaining > 2:
return True
# possession starts in final 2 seconds
# return True if there is a FT or FGM between now and end of period
next_event = prev_event.next_event
while next_event is not None:
if isinstance(next_event, FreeThrow) or (
isinstance(next_event, FieldGoal) and next_event.is_made
):
return True
next_event = next_event.next_event
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_possession_ending_event(self):\n pass",
"def significant_position_change(self, timestamp, new_position):\n timediff = (timestamp - self.timestamp).total_seconds()\n posdiff = (new_position - self.position) / 1000\n diffdiff = timediff - posdiff\n\n if abs(diffdiff) > 5:\n return True\n return False",
"def calc_is_new_position(self, game_state: dict):\n current_position = game_state['self'][3]\n if current_position in self.positions:\n return False\n else:\n return True",
"def is_use_qps(self) -> bool:\n if self.qps > 0 and self.second > 0:\n return True\n else:\n return False",
"def is_second_chance_event(self):\n event = self.previous_event\n if isinstance(event, Rebound) and event.is_real_rebound and event.oreb:\n return True\n while not (event is None or event.is_possession_ending_event):\n if isinstance(event, Rebound) and event.is_real_rebound and event.oreb:\n return True\n event = event.previous_event\n return False",
"def is_happening(self):\n now = timezone.now()\n start = self.start\n end = self.end\n happening = False\n # check that the event has started and 'now' is btwn start & end:\n if (now >= start) and (start.time() <= now.time() <= end.time()):\n happening = True\n return happening",
"def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False",
"def get_is_on(self, event: Event | None) -> bool:\n if event is None:\n return False\n\n now = dt_util.utcnow()\n value = now > event.start\n if value and event.end is not None and now > event.end:\n value = False\n\n return value",
"def has_position_improved(self, previous_matchday_standing):\n return \\\n self.position < previous_matchday_standing.position and \\\n self.played_games > previous_matchday_standing.played_games",
"def is_session_valid(self, logonTimestamp):\n time_diff = time.time() - logonTimestamp\n return (time_diff / 60) < self.session_time_limit",
"def has_happened(self):\n\n return self.end < timezone.now()",
"def has_position_changed(self, previous_matchday_standing):\n return \\\n self.position != previous_matchday_standing.position and \\\n self.played_games > previous_matchday_standing.played_games",
"def calc_position_change(self, game_state: dict):\n current_position = game_state['self'][3]\n # print(f'Current Position: {current_position}')\n while len(self.positions) > 3:\n self.positions.pop(0)\n\n if current_position in self.positions:\n return True\n else:\n return False",
"def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs",
"def _opponent_defected_in_first_n_rounds(\n opponent: Player, first_n_rounds: int\n) -> bool:\n return D in opponent.history[:first_n_rounds]",
"def check_session(col_user, session_id, timestamp, elapse_limit=60):\n the_user = col_user.find_one({\"session_key.session_id\": session_id})\n if not the_user:\n return False\n\n the_timestamp = the_user[\"session_key\"].get(\"timestamp\")\n current_timestamp = convert_to_bson_timestamp(timestamp)\n\n elapsed = current_timestamp.time - the_timestamp.time\n if elapsed >= elapse_limit:\n return False\n return the_user",
"def double_score_started(self):\n if self.double_score_start is False:\n return False\n else:\n return True",
"def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False",
"def is_soon(dt, window):\r\n soon = (utcnow() + datetime.timedelta(seconds=window))\r\n return normalize_time(dt) <= soon",
"def verify_event_timing(self, event, item):\n return True",
"def is_triggered(self, curr_time: pd.Timestamp, state) -> bool:\n\n if self._n_passed >= len(self._schedule):\n return False\n else:\n next_trigger_time = self._schedule[self._n_passed]\n\n if next_trigger_time > curr_time:\n return False\n else:\n self._n_passed = sum([t <= curr_time for t in self._schedule])\n return True",
"def EndCriteria(Vote):\n if (time.time() - Vote['VoteInfo']['timeout']) < Vote['VoteInfo']['StartTime']:\n return True\n if Vote['TotalVotes'] == len(livingPlayers)-2:\n return True",
"def manage_position(self, dt, pos, logic_df):\n if pos.almost_expired_ratio(dt) > 0:\n pos.close(dt)",
"def is_penalty_event(self):\n if hasattr(self, \"fouls_to_give\"):\n team_ids = list(self.current_players.keys())\n offense_team_id = self.get_offense_team_id()\n defense_team_id = (\n team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]\n )\n if self.fouls_to_give[defense_team_id] == 0:\n if isinstance(self, (Foul, FreeThrow, Rebound)):\n # if foul or free throw or rebound on a missed ft\n # check foul event and should return false is foul\n # was shooting foul and team had a foul to give\n if isinstance(self, Foul):\n foul_event = self\n elif isinstance(self, FreeThrow):\n foul_event = self.foul_that_led_to_ft\n else:\n # if rebound is on missed ft, also need to look at foul that led to FT\n if not self.oreb and isinstance(self.missed_shot, FreeThrow):\n foul_event = self.missed_shot.foul_that_led_to_ft\n else:\n return True\n if foul_event is None:\n return True\n fouls_to_give_prior_to_foul = (\n foul_event.previous_event.fouls_to_give[defense_team_id]\n )\n if fouls_to_give_prior_to_foul > 0:\n return False\n return True\n return False",
"def _is_paused(self):\n self.paused = self.state == 0\n return self.paused",
"def is_soon(dt, window):\n soon = (utcnow() + datetime.timedelta(seconds=window))\n return normalize_time(dt) <= soon",
"def stats_change(self):\n return True if self.board.prev_state != self.board.shot_count else False",
"def is_paused(self):\n\t\treturn self.pause",
"def game_over(self):\n return self.lives() < 0",
"def repetition_happened(self):\n repetition = False\n if len(self.moves) >= 12:\n if self.moves[-1][0] == self.moves[-5][0] == self.moves[-9][0] and \\\n self.moves[-1][1] == self.moves[-5][1] == self.moves[-9][1] and \\\n self.moves[-2][0] == self.moves[-6][0] == self.moves[-10][0] and \\\n self.moves[-2][1] == self.moves[-6][1] == self.moves[-10][1] and \\\n self.moves[-3][0] == self.moves[-7][0] == self.moves[-11][0] and \\\n self.moves[-3][1] == self.moves[-7][1] == self.moves[-11][1] and \\\n self.moves[-4][0] == self.moves[-8][0] == self.moves[-12][0] and \\\n self.moves[-4][1] == self.moves[-8][1] == self.moves[-12][1]:\n repetition = True\n\n return repetition"
] | [
"0.7091942",
"0.60511667",
"0.58913386",
"0.5813963",
"0.5769117",
"0.5687262",
"0.5611369",
"0.56004244",
"0.55632025",
"0.5527948",
"0.5527946",
"0.55076903",
"0.54790807",
"0.5442499",
"0.54219216",
"0.5415135",
"0.5407619",
"0.5396577",
"0.5360468",
"0.53576845",
"0.53544474",
"0.5350832",
"0.5345729",
"0.5343773",
"0.53251016",
"0.53158414",
"0.53155833",
"0.53153086",
"0.53008413",
"0.5275285"
] | 0.82088506 | 0 |
download the HR2 from the sqlite DB | def download(self,connector,condition):
c= connector.cursor()
# condition = " WHERE DIF_ID=%d AND NUM=%d" % (difid,num)
snew = buildSelect(self,'HR2',condition)
# print snew
c.execute(snew)
lnames=[]
for name,val in sorted(self.__dict__.iteritems()):
lnames.append(name)
vobj=[]
for row in c:
# print row
hr2=HR2Def(0)
for i in range(len(lnames)):
#print lnames[i],row[i]
hr2.__dict__[lnames[i]]=row[i]
vobj.append(hr2)
return vobj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n\treturn response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)"
] | [
"0.6795823",
"0.6621874",
"0.6621874",
"0.6621874",
"0.6621874",
"0.6621874",
"0.6621874",
"0.6479302",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004",
"0.62850004"
] | 0.71289873 | 0 |
download the DCC from the sqlite DB | def download(self,connector,condition):
c= connector.cursor()
snew = buildSelect(self,'DCC',condition)
# print snew
c.execute(snew)
lnames=[]
for name,val in sorted(self.__dict__.iteritems()):
lnames.append(name)
vobj=[]
for row in c:
# print row
obj=DCCDef()
for i in range(len(lnames)):
obj.__dict__[lnames[i]]=row[i]
vobj.append(obj)
return vobj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_dbase(ascii_dbase_url, ascii_dbase_root):\n from fiasco import log\n log.debug(f'Downloading database from {ascii_dbase_url}')\n log.debug(f'Downloading database to {ascii_dbase_root}')\n tar_tmp_dir = FIASCO_HOME / 'tmp'\n tar_tmp_dir.mkdir(exist_ok=True, parents=True)\n with set_temp_cache(path=tar_tmp_dir, delete=True):\n tmp_tar = download_file(ascii_dbase_url, cache=True, show_progress=True)\n with tarfile.open(tmp_tar) as tar:\n tar.extractall(path=ascii_dbase_root)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n\treturn response.download(request, db)",
"def download(self,connector,condition):\n c= connector.cursor()\n\n\n snew = buildSelect(self,'DIF',condition)\n# print snew\n c.execute(snew)\n lnames=[]\n for name,val in sorted(self.__dict__.iteritems()):\n lnames.append(name)\n\n vobj=[]\n for row in c:\n# print row\n obj=DIFDef()\n for i in range(len(lnames)):\n obj.__dict__[lnames[i]]=row[i]\n vobj.append(obj)\n\n \n return vobj",
"def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)"
] | [
"0.64645934",
"0.63106734",
"0.63106734",
"0.63106734",
"0.63106734",
"0.63106734",
"0.63106734",
"0.61389863",
"0.61100876",
"0.6083434",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745",
"0.59853745"
] | 0.66012114 | 0 |
download the SETUP from the sqlite DB | def download(self,connector,condition):
c= connector.cursor()
snew = buildSelect(self,'SETUP',condition)
# print snew
c.execute(snew)
lnames=[]
for name,val in sorted(self.__dict__.iteritems()):
lnames.append(name)
vobj=[]
for row in c:
# print row
obj=SETUPDef()
for i in range(len(lnames)):
obj.__dict__[lnames[i]]=row[i]
vobj.append(obj)
return vobj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_and_prepare(self):\n self._download_and_prepare()",
"def download_and_preprocess(self):\n print('Preparing steering angle database.')\n print('Downloading...')\n self.download()\n print('Preprocessing...')\n self.preprocess()",
"def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download_dbase(ascii_dbase_url, ascii_dbase_root):\n from fiasco import log\n log.debug(f'Downloading database from {ascii_dbase_url}')\n log.debug(f'Downloading database to {ascii_dbase_root}')\n tar_tmp_dir = FIASCO_HOME / 'tmp'\n tar_tmp_dir.mkdir(exist_ok=True, parents=True)\n with set_temp_cache(path=tar_tmp_dir, delete=True):\n tmp_tar = download_file(ascii_dbase_url, cache=True, show_progress=True)\n with tarfile.open(tmp_tar) as tar:\n tar.extractall(path=ascii_dbase_root)",
"def _db_setup(self):\n self.get_connection()\n sql_file = open(db_config.DATABASE_TABLES_SETUP_FILE, 'r')\n with self.conn.cursor() as cur:\n cur.execute(sql_file.read())\n self.conn.commit()\n logger.info(f'The script {db_config.DATABASE_TABLES_SETUP_FILE} has run.')",
"def database(request):\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(os.path.join(tmpdir, 'data.zip'), 'x') as datazip:\n datazip.write(settings.DATABASES['default']['NAME'], arcname='db.sqlite3')\n response = FileResponse(open(os.path.join(tmpdir, 'data.zip'), 'rb'))\n response['Content-Disposition'] = ('attachment; filename=\"db.sqlite3.zip\"')\n return response",
"def setup(db_name = 'net.db', **extra_params):\n global db_run # Imports the DB from the simulator\n \n# # If the file already exists delete it\n if DEBUG: print \"[ pyNN ] : Opening DB\", os.path.abspath(db_name)\n if os.path.exists(db_name):\n if DEBUG: print \"[ pyNN ] : DB already initialized... cleaning up... removing file %s\" % db_name\n os.remove(db_name)\n db_run = db(db_name) # Creates the DB \n db_run.init_db() # Initializes the DB\n return(db_run)",
"def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)",
"def db_seeder():\n logger.info(\"SQLite DB seeder initiated\")\n connection = sqlite3.connect(SETTINGS['DBPATH'])\n cursorobj = connection.cursor()\n basetbl = \"urlsbase\"\n try:\n cursorobj.execute('SELECT * FROM %s' %(basetbl))\n logger.info(\"Table \\'%s\\' exists, table verified\", basetbl)\n print('')\n except Exception:\n logger.info(\"Creating table \\'%s\\'\", basetbl)\n cursorobj.execute('CREATE TABLE %s (\\\n id INTEGER PRIMARY KEY AUTOINCREMENT,\\\n url TEXT,\\\n title TEXT DEFAULT \"\",\\\n shrink TEXT,\\\n hits INTEGER DEFAULT 0,\\\n created_at TIMESTAMP,\\\n updated_at TIMESTAMP,\\\n lasthit_at TIMESTAMP)' %(basetbl))\n logger.info(\"Successfully created table \\'%s\\'\", basetbl)\n connection.commit()\n connection.close()\n logger.info(\"Seeder completed\")",
"async def prepare_databases(self):",
"def download():\n\treturn response.download(request, db)",
"def setup(self):\n \n dbpath, config = self._start()\n \n self.logger.msg1(\"Workding directory: \"+dirname(dbpath))\n # test if already exists - build from scratch or not?\n if exists(dbpath):\n if not self.reset: \n self.logger.msg1(\"Skipping database build; database exists\")\n return None, None \n self.logger.msg1(\"Removing existing database\")\n os.remove(dbpath)\n\n # create a new database file\n self.logger.msg1(\"Creating new database: \"+basename(dbpath)) \n setup_db(dbpath, tables=self.tables)\n \n return dbpath, config",
"def prepare(self):\n super(Test200SmartSanityDownload004, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Create a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('ob_db_sdb_01.smart')\n self.PROJECT.project_open('ob_db_sdb_02.smart')",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)"
] | [
"0.62866896",
"0.62310046",
"0.61061794",
"0.60753727",
"0.60753727",
"0.60753727",
"0.60753727",
"0.60753727",
"0.60753727",
"0.6056658",
"0.5950842",
"0.5944346",
"0.5913646",
"0.58318216",
"0.58160055",
"0.57921296",
"0.57838356",
"0.57605076",
"0.57173675",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499",
"0.5682499"
] | 0.6631947 | 0 |
Create the descriptor. `base_attr` is the name of an integer attribute that represents binary flags. `bitmask` is the binary value to toggle on `base_attr`. | def __init__(self, base_attr, bitmask):
self.base_attr = base_attr
self.bitmask = bitmask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __set__(self, obj, enabled):\n\n value = getattr(obj, self.base_attr)\n if enabled:\n value |= self.bitmask\n else:\n value &= ~self.bitmask\n setattr(obj, self.base_attr, value)",
"def _create_inherited_flag_field(property_):\n name_for_methods = join_names(property_['name_for_methods'], 'is', 'inherited')\n return Field(\n 'inherited_flag',\n name_for_methods,\n property_name=property_['name'],\n type_name='bool',\n wrapper_pointer_name=None,\n field_template='primitive',\n size=1,\n default_value='true',\n custom_copy=False,\n custom_compare=False,\n mutable=False,\n getter_method_name=method_name(name_for_methods),\n setter_method_name=method_name(['set', name_for_methods]),\n initial_method_name=method_name(['initial', name_for_methods]),\n computed_style_custom_functions=property_[\"computed_style_custom_functions\"],\n )",
"def flags(cls):\n\n assert cls.__bases__ == (object,)\n\n d = dict(cls.__dict__)\n new_type = type(cls.__name__, (int,), d)\n new_type.__module__ = cls.__module__\n\n map_ = {}\n for key, value in iteritems(d):\n if key.upper() == key and isinstance(value, integer_types):\n value_instance = new_type(value)\n setattr(new_type, key, value_instance)\n map_[value] = key\n\n def str_(self):\n value = int(self)\n matches = []\n for k, v in map_.items():\n if value & k:\n matches.append(\"%s.%s\" % (type(self).__name__, v))\n value &= ~k\n if value != 0 or not matches:\n matches.append(text_type(value))\n\n return \" | \".join(matches)\n\n def repr_(self):\n return \"<%s: %d>\" % (str(self), int(self))\n\n setattr(new_type, \"__repr__\", repr_)\n setattr(new_type, \"__str__\", str_)\n\n return new_type",
"def _create_Flag(classname, defaultname, flags):\n def __init__(self, name=defaultname, attr=None):\n \"\"\"Initialize custom Flag.\"\"\"\n Flag.__init__(self, name, flags, attr)\n\n def __repr__(self):\n \"\"\"Format as python parsable string.\"\"\"\n args = []\n if self.name != defaultname:\n args.append(self.name)\n if self.propertiesstr:\n args.append(self.propertiesstr)\n args = map(repr, args)\n elif self.propertiesstr:\n args.append(\"attr=%r\" % self.propertiesstr)\n return \"%s(%s)\" % (type(self).__name__, \", \".join(args))\n\n globals()[classname] = type(classname, (Flag,), {\n \"__init__\": __init__,\n \"__repr__\": __repr__\n })",
"def set_bitmask(self, value):\r\n self.__bitmask__ = value | 0xFF00",
"def __get__(self, obj, type=None):\n\n return bool(getattr(obj, self.base_attr) & self.bitmask)",
"def fromflags(cls, scope:MjoScope, type:MjoType, dimension:int=0, modifier:MjoModifier=MjoModifier.NONE, invert:MjoInvert=MjoInvert.NONE) -> 'MjoFlags':\n flags = 0\n flags |= ((modifier.value & 0x7))#<< 0)\n flags |= ((invert.value & 0x3) << 3)\n flags |= ((scope.value & 0x7) << 5)\n flags |= ((type.value & 0x7) << 8)\n #FIXME: Stop handling MjoDimension as int entirely??\n flags |= ((int(dimension) & 0x3) << 11)\n return cls(flags)",
"def from_bitmask ( cls, mode, rwx_bits ):\n return cls (\n mode & rwx_bits[0], mode & rwx_bits[1], mode & rwx_bits[2],\n )",
"def __init__(self,\n minBit,\n nBits,\n desc):\n self.minBit = minBit\n self.nBits = nBits\n self.desc = desc",
"def build_active_schema(cls, attr):\n return cls(attr.name, range=(attr.value, attr.value))",
"def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: int, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __new__(cls, value: int):\n new_object = super(CalibrationStatus, cls).__new__(cls, value)\n new_object._value = value # type: ignore[attr-defined]\n new_object._binary = new_object._to_binary() # type: ignore[attr-defined]\n return new_object",
"def get_bitmask(self):\r\n return self.__bitmask__",
"def __init__(self, policy: TypePolicy, byte_size: int, flags: int, bin: TypeBinName): \n self._children= (\n byte_size,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: flags} if flags is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def create(self, descriptor_type=None, descriptor_size=None, descriptor_channels=None, threshold=None, nOctaves=None, nOctaveLayers=None, diffusivity=None): # real signature unknown; restored from __doc__\n pass",
"def add_attr(chain, attrs):\n chain.TotBandEnergy = attrs.get(\"TotBandEnergy\")\n if attrs.get(\"climbSet\", False):\n chain.climbSet = True\n chain.climbers = attrs.get(\"climbers\")\n chain.locks = attrs.get(\"locks\")\n return chain",
"def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})",
"def __init__(self, bit_offset: int, bit_size: int, value: bool, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, bit_offset: int, bit_size: int, value: bool, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, name, value, makehex=False):\n\n self.name = name\n self.value = value\n self.makehex = makehex\n\n # An EnumSet this is a member of\n self.eset = None",
"def __init__(self, policy: TypePolicy, byte_offset: int, value: TypeBitValue, bin: TypeBinName): \n self._children= (\n byte_offset,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, boolee):\n\n super(BinaryColor, self).__init__(1)\n self.boolee = bool(boolee)",
"def __init__(self, name, final_type, default, *,\n allow_falsy, allow_indirect=True, mutable=True, visible=True,\n pseudo_setting=False, # v0.3.0b24\n **more_attributes):\n assert not default or isinstance(default, final_type)\n self.name = name # key\n self.final_type = final_type # bool int str logging.Logger ...\n self.default = default\n self.allow_falsy = allow_falsy # is a falsy final val of setting allowed\n self.allow_indirect = allow_indirect and visible # are indirect values allowed\n self.mutable = mutable\n self.visible = visible\n self.pseudo_setting = pseudo_setting # v0.3.0b24\n\n # v0.3.0b25\n self.indirect_default = more_attributes.pop('indirect_default', self.default)\n\n # we need to write fields in repr the same way every time,\n # so even though more_attributes isn't ordered,\n # we need to pick an order & stick to it\n self._user_attrs = sorted(list(more_attributes))\n self.__dict__.update(more_attributes)",
"def __init__(self, field_name, *args, **kwargs):\n items = list()\n for i in xrange(8, 0, -1):\n items.append((field_name + str(i), Bits(maxlen=1, value=0)))\n super(PresenceFlags, self).__init__(*(items+list(args)), **kwargs)",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: int, action: int, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: action} if action is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: int, action: int, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: action} if action is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: TypeBitValue, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: TypeBitValue, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )",
"def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: TypeBitValue, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )"
] | [
"0.5400316",
"0.5286925",
"0.5209522",
"0.51593536",
"0.51050526",
"0.5062308",
"0.49769795",
"0.49352902",
"0.4898312",
"0.47941947",
"0.4774199",
"0.4736493",
"0.473473",
"0.4696283",
"0.46891314",
"0.46879336",
"0.46860164",
"0.4677896",
"0.46669382",
"0.46669382",
"0.4665192",
"0.46528533",
"0.46453902",
"0.46447098",
"0.4632677",
"0.46179125",
"0.46179125",
"0.46144158",
"0.46144158",
"0.46144158"
] | 0.76175773 | 0 |
Tests the backward pass of the hinge loss function | def test_hinge_loss_backward():
from your_code import HingeLoss
X = np.array([[-1, 2, 1], [-3, 4, 1]])
w = np.array([1, 2, 3])
y = np.array([1, -1])
loss = HingeLoss(regularization=None)
_true = np.array([-1.5, 2, 0.5])
_est = loss.backward(X, w, y)
print(_est) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_backward(net, X, Y, grad, loss, index):\n eps = 1e-7\n backup = X[index]\n X[index] += eps\n A1 = net.forward(X)\n loss1 = net.loss(Y, A1[-1])\n ratio = (loss1 - loss) / eps\n assert np.isclose(grad[index], ratio)\n X[index] = backup",
"def _backward(loss):\n\n loss.backward()",
"def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()",
"def backward_G(self):\n self.loss_G.backward()",
"def forward(self,y_out, y_truth): \n result = None\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the BCE loss. #\n #########################################################################\n\n result = -1 * (np.multiply(y_truth, np.log(y_out)) + np.multiply((1 - y_truth), np.log(1 - y_out)))\n \n \n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result",
"def test_relu_back(x, y):\n if x > 0:\n assert op.relu_back(x, y) == y\n else:\n assert op.relu_back(x, y) == 0.0",
"def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()",
"def forward(self,y_out, y_truth): \n result = (np.square(np.subtract(y_out, y_truth)))\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the MSE loss. #\n #########################################################################\n\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result",
"def backward_pass(self, grad):\n pass",
"def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize",
"def test_gradient_descent_blobs():\n features, _, targets, _ = load_data('blobs')\n\n hinge = make_predictions(features, targets, 'hinge', None)\n # assert np.all(hinge == targets)\n\n # l1_hinge = make_predictions(features, targets, 'hinge', 'l1')\n # # assert np.all(l1_hinge == targets)\n #\n # l2_hinge = make_predictions(features, targets, 'hinge', 'l2')\n # # assert np.all(l2_hinge == targets)\n #\n # squared = make_predictions(features, targets, 'squared', None)\n # # assert np.all(squared == targets)\n #\n # l1_squared = make_predictions(features, targets, 'squared', 'l1')\n #\n # l2_squared = make_predictions(features, targets, 'squared', 'l2')",
"def backward(model, xs, h1s, h2s, errs):\n # errs is the gradients of output layer for the minibatch\n # dW4 = (np.dot(h3s.T, errs))/xs.shape[0]\n\n # # Get gradient of hidden layer\n # dh_3 = np.dot(errs, model['W4'].T)\n # dh_3[h3s <= 0] = 0\n \n # # The bias \"neuron\" is the constant 1, we don't need to backpropagate its gradient\n # # since it has no inputs, so we just remove its column from the gradient\n # dh_3 = dh_3[:, :-1]\n \n # Gradient for weights H1 -> H2\n dW3 = (np.dot(h2s.T, errs)) / xs.shape[0]\n\n dh_2 = np.dot(errs, model['W3'].T)\n dh_2[h2s <= 0] = 0\n \n # The bias \"neuron\" is the constant 1, we don't need to backpropagate its gradient\n # since it has no inputs, so we just remove its column from the gradient\n dh_2 = dh_2[:, :-1]\n \n # Gradient for weights H1 -> H2\n dW2 = (np.dot(h1s.T, dh_2)) / xs.shape[0]\n \n # Gradient of h1\n dh_1 = np.dot(dh_2, model['W2'].T)\n dh_1[h1s <= 0] = 0\n\n # Again, drop the bias column\n dh_1 = dh_1[:, :-1]\n \n # Add the 1 to the data, to compute the gradient of W1\n ones = np.ones((xs.shape[0], 1))\n xs = np.hstack([xs, ones])\n\n dW1 = (np.dot(xs.T, dh_1))/xs.shape[0]\n\n return dict(W1=dW1, W2=dW2, W3=dW3)",
"def test_forward_backward(self):\n f = forward(self.obs, self.S, self.A, self.E)\n b = backward(self.obs, self.S, self.A, self.E)\n fp = logsumexp(f[:, -1])\n emission = precompute_emission(np.log(self.E))[tuple(self.obs[0])]\n bp = logsumexp(np.log(self.S) + emission + b[:, 0])\n assert_allclose(fp, bp)",
"def lossFun(review, target, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in range(len(review)):\n xs[t] = np.zeros((vector_len,1)) # encode in 1-of-k representation\n for j in range(32):\n xs[t][j] = review[t][j]\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n\n #Many 2 one\n last = len(review) - 1\n ys = np.dot(Why, hs[last]) + by # unnormalized log probabilities for next chars\n ps = np.exp(ys) / np.sum(np.exp(ys)) # probabilities for next chars\n loss = -np.log(ps[target,0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n dy = np.subtract(ps,target) # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[last].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n for t in reversed(range(len(review))):\n dhraw = (1 - (hs[t] * hs[t].T)) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[last]",
"def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()",
"def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")",
"def backward_D(self):\n self.loss_D.backward()",
"def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()",
"def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad",
"def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]",
"def backward(cls, y, y_hat):\n raise Exception(\"Unimplemented\")",
"def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return",
"def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)",
"def test_squared_loss_forward():\n from your_code import SquaredLoss\n X = np.array([[-1, 2, 1], [-3, 4, 1]])\n w = np.array([1, 2, 3])\n y = np.array([1, -1])\n\n loss = SquaredLoss(regularization=None)\n\n _true = 26.5\n _est = loss.forward(X, w, y)\n print(_est)",
"def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.dropout_param is not None:\n self.dropout_param['mode'] = mode \n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n \n ### forward pass ###\n L = self.num_layers\n past_caches = [0 for i in range(L)]\n \n if self.use_dropout:\n dropout_caches = [0 for i in range(L)]\n \n out = X\n if self.use_batchnorm:\n for i in range(L-1):\n\n out, past_caches[i] = affine_batch_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)], \n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n else:\n for i in range(L-1):\n\n out, past_caches[i] = affine_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n \n scores, past_caches[L-1] = affine_forward(out, self.params['W' + str(L)],\n self.params['b' + str(L)])\n \n ### backpropagation ###\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n loss_l2 = 0\n \n loss, dx = softmax_loss(scores, y)\n for i in range(L-1): \n W = 'W' + str(i+1)\n loss_l2 += np.sum(self.params[W]*self.params[W])\n loss_l2 *= 0.5 * self.reg\n loss += loss_l2\n \n W_final = 'W'+str(L)\n b_final = 'b'+str(L)\n dx, grads[W_final], grads[b_final] = affine_backward(dx, past_caches[L-1])\n grads[W_final] += self.reg * self.params[W_final]\n \n if self.use_batchnorm:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n gamma = 'gamma' + str(ind)\n beta = 'beta' + str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b], grads[gamma], grads[beta] = affine_batch_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n else:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b] = affine_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n return loss, grads",
"def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)",
"def backward(self, gradient):\n #TODO\n pass",
"def backward(self, gradient):\n #TODO\n pass",
"def _backprop(x, target, w_out, b_out, w_h=None, b_h=None):\n\n # If there is no hidden layer, <sigma_h> is just the input layer (see\n # <forward>). In this case, we return immediately after computing gradients\n # for output layer, since there are no more layers to consider.\n sigma_h, sigma_out = forward(x, w_out, b_out, w_h, b_h)\n\n # Backprop sigma_out = sigmoid(out) = sigmoid(w_out @ sigma_h + b_out).\n # We denote the loss function by L.\n\n # ∂L/∂b_out = ∂L/∂sigma_out * ∂sigma_out/∂out * ∂out/∂b_out\n # Notice ∂out/∂b_out is the one vector since coefficient of b_out is 1.\n # Therefore, ∂L/∂b_out = ∂L/∂sigma_out * ∂sigma_out/∂out = ∂L/∂out.\n db_out = (target - sigma_out) * sigma_out * (1 - sigma_out) # K x 1\n\n # ∂L/∂w_out = ∂L/∂out * ∂out/∂w_out\n dw_out = db_out @ sigma_h.reshape(1, -1) # K x H\n\n if (w_h is None):\n return (dw_out, db_out, None, None)\n\n # Backprop sigma_h = sigmoid(h) = sigmoid(w_h @ x + b_h).\n # This is only necessary if there is a hidden layer.\n\n # ∂L/∂sigma_h = ∂L/∂out * ∂out/∂sigma_h\n # Note: Mitchell refers to this partial as ∂net_k.\n dsigma_h = np.transpose(w_out) @ db_out # H x 1\n\n # ∂L/∂h = ∂L/∂sigma_h * ∂sigma_h/∂h\n # Similar to ∂L/∂b_out, ∂h/∂b_h is the one vector, so ∂L/∂b_h = ∂L/∂h.\n db_h = dsigma_h * sigma_h * (1 - sigma_h) # H x 1\n\n # ∂L/∂w_h = ∂L/∂h * ∂h/∂w_h\n dw_h = db_h @ x.reshape(1, -1) # H x N\n\n return (dw_out, db_out, dw_h, db_h)",
"def forward_loss(self, img_emb, cap_emb, **kwargs):\n loss = self.criterion(img_emb, cap_emb)\n self.logger.update('Le', loss.data, img_emb.size(0))\n return loss"
] | [
"0.70681214",
"0.6942057",
"0.659498",
"0.6545506",
"0.6523432",
"0.64993215",
"0.64905953",
"0.64883435",
"0.64445055",
"0.6438305",
"0.64352924",
"0.642554",
"0.6404636",
"0.6404417",
"0.63782287",
"0.63528156",
"0.6351562",
"0.6339242",
"0.6338139",
"0.6330921",
"0.6330372",
"0.63294256",
"0.6314674",
"0.63052446",
"0.62817055",
"0.6274549",
"0.6258345",
"0.6258345",
"0.6256198",
"0.6244522"
] | 0.8668524 | 0 |
Tests the forward pass of the squared loss function | def test_squared_loss_forward():
from your_code import SquaredLoss
X = np.array([[-1, 2, 1], [-3, 4, 1]])
w = np.array([1, 2, 3])
y = np.array([1, -1])
loss = SquaredLoss(regularization=None)
_true = 26.5
_est = loss.forward(X, w, y)
print(_est) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self,y_out, y_truth): \n result = (np.square(np.subtract(y_out, y_truth)))\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the MSE loss. #\n #########################################################################\n\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result",
"def test_fwd_propagation():\n\n X = np.random.randn(3, 5)\n print(\"test\", \"X\", X)\n print(\"test\", \"X.shape\", X.shape)\n W = np.random.randn(2, 3)\n print(\"test\", \"W\", W)\n print(\"test\", \"W.shape\", W.shape)\n B = np.random.randn(2, 1)\n print(\"test\", \"B\", B)\n print(\"test\", \"B.shape\", B.shape)\n Z, yhat = forward_propagation(X, W, B)\n print(\"test\", \"Z\", Z)\n print(\"test\", \"yhat\", yhat)\n\n ycorrect = np.arange(10).reshape(2, 5)\n print(\"test\", \"ycorrect\", ycorrect)\n test_loss = loss(yhat, ycorrect)\n print(\"test\", \"test_loss\", test_loss)\n test_cost = cost(test_loss)\n print(\"test\", \"test_cost\", test_cost)\n\n # dA = A - Y\n dA = yhat - ycorrect\n\n dX, dW, dB = both_relu_and_linear_transform_and_chain_rule(dA, Z, X, W)\n print(\"test\", \"dW\", dW)\n print(\"test\", \"dB\", dB)",
"def test_step(self, batch, batch_idx):\n x, y = batch\n pred = self.forward(x)\n loss = self.m_loss_function(pred, y)\n return loss",
"def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.dropout_param is not None:\n self.dropout_param['mode'] = mode \n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n \n ### forward pass ###\n L = self.num_layers\n past_caches = [0 for i in range(L)]\n \n if self.use_dropout:\n dropout_caches = [0 for i in range(L)]\n \n out = X\n if self.use_batchnorm:\n for i in range(L-1):\n\n out, past_caches[i] = affine_batch_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)], \n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n else:\n for i in range(L-1):\n\n out, past_caches[i] = affine_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n \n scores, past_caches[L-1] = affine_forward(out, self.params['W' + str(L)],\n self.params['b' + str(L)])\n \n ### backpropagation ###\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n loss_l2 = 0\n \n loss, dx = softmax_loss(scores, y)\n for i in range(L-1): \n W = 'W' + str(i+1)\n loss_l2 += np.sum(self.params[W]*self.params[W])\n loss_l2 *= 0.5 * self.reg\n loss += loss_l2\n \n W_final = 'W'+str(L)\n b_final = 'b'+str(L)\n dx, grads[W_final], grads[b_final] = affine_backward(dx, past_caches[L-1])\n grads[W_final] += self.reg * self.params[W_final]\n \n if self.use_batchnorm:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n gamma = 'gamma' + str(ind)\n beta = 'beta' + str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b], grads[gamma], grads[beta] = affine_batch_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n else:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b] = affine_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n return loss, grads",
"def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads",
"def test_loss_with_reg(self):\n self.model.w = np.array([[0.1, 0.2]])\n self.model.b = 0.2\n self.model.l2_reg = 0.1\n x = np.array([[0.3, 0.4],\n [0.5, 0.6]])\n y = np.array([1, -1])\n out = self.model.loss(x, y)\n should_be = 0.7226 + 0.1 * np.sum(self.model.w ** 2)\n\n # test numerically\n self.assertTrue(np.abs(out - should_be) < 0.01)",
"def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads",
"def _check_training(\n self, model, x_train, y_train, loss_name, num_epochs=2, learning_rate=0.001\n ):\n # create loss function\n loss = getattr(crypten.nn, loss_name)()\n\n for i in range(num_epochs):\n output = model(x_train)\n loss_value = loss(output, y_train)\n\n # set gradients to \"zero\"\n model.zero_grad()\n for param in model.parameters():\n self.assertIsNone(param.grad, \"zero_grad did not reset gradients\")\n\n # perform backward pass\n loss_value.backward()\n for param in model.parameters():\n if param.requires_grad:\n self.assertIsNotNone(\n param.grad, \"required parameter gradient not created\"\n )\n\n # update parameters\n orig_parameters, upd_parameters = {}, {}\n orig_parameters = self._compute_reference_parameters(\n \"\", orig_parameters, model, 0\n )\n model.update_parameters(learning_rate)\n upd_parameters = self._compute_reference_parameters(\n \"\", upd_parameters, model, learning_rate\n )\n\n # check parameter update\n parameter_changed = False\n for name, value in orig_parameters.items():\n if param.requires_grad and param.grad is not None:\n unchanged = torch.allclose(upd_parameters[name], value)\n if unchanged is False:\n parameter_changed = True\n self.assertTrue(\n parameter_changed, \"no parameter changed in training step\"\n )\n\n # record initial and current loss\n if i == 0:\n orig_loss = loss_value.get_plain_text()\n curr_loss = loss_value.get_plain_text()\n\n # check that the loss has decreased after training\n self.assertTrue(\n curr_loss.item() < orig_loss.item(),\n f\"{loss_name} has not decreased after training\",\n )",
"def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)",
"def _make_loss_test(self):\n with context.context(training=False):\n prediction = self(*self.inputs)\n thecost = self.cost(self.target, prediction)\n return theano.function(self.inputs + [self.target], thecost)",
"def test_relu_back(x, y):\n if x > 0:\n assert op.relu_back(x, y) == y\n else:\n assert op.relu_back(x, y) == 0.0",
"def loss(self, X, y=None):\r\n\r\n # Findout if it's trainig or test time\r\n mode = 'train'\r\n if y is None:\r\n mode = 'test'\r\n\r\n # Set the mode for batch normalization and dropout parameters if needed.\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n if self.use_dropout:\r\n self.dropout_params['mode'] = mode\r\n\r\n # Compute the forward pass fo the cnn.\r\n caches = []\r\n input_layer = X\r\n for i in range(1, self.num_conv_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = conv_bn_relu_pool_forward(input_layer, w, b, gamma, beta,\r\n self.conv_params, self.bn_params[i-1], \r\n self.pool_params)\r\n else:\r\n layer_score, layer_cache = conv_relu_pool_forward(input_layer, w, b, self.conv_params, \r\n self.pool_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the fully connected net.\r\n num_layers = self.num_conv_layers + self.num_hidden_layers\r\n for i in range(self.num_conv_layers+1, num_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = affine_bn_relu_forward(input_layer, w, b, gamma, beta,\r\n self.bn_params[i-1],\r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n else:\r\n layer_score, layer_cache = affine_relu_forward(input_layer, w, b, dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the output layer.\r\n w = self.params['W{}'.format(i+1)]\r\n b = self.params['b{}'.format(i+1)]\r\n scores, output_cache = affine_forward(input_layer, w, b)\r\n\r\n # If testing time return the scores\r\n if mode == 'test':\r\n return scores\r\n\r\n # Compute the loss\r\n loss, dscores = softmax_loss(scores, y)\r\n\r\n # Add regularization to the loss and the corresponding gradient.\r\n grads = {}\r\n for i in range(1, num_layers+2):\r\n w = 'W{}'.format(i)\r\n loss += 0.5 * self.reg * np.sum(self.params[w]**2)\r\n grads[w] = self.reg * self.params[w]\r\n\r\n # Compute the gradients using backprop on the fully connected net.\r\n # Start with the output layer\r\n w = 'W{}'.format(num_layers+1)\r\n b = 'b{}'.format(num_layers+1)\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n for i in range(num_layers, self.num_conv_layers, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, cache, self.use_dropout)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n # Compute the gradeints using backprop on the convolutional layers.\r\n for i in range(self.num_conv_layers, 0, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, cache)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = conv_relu_pool_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n return loss, grads",
"def solves_simple_problem(X, y, nn_instance):\n optimizer = optim.Adam(nn_instance.parameters(), lr=0.15)\n for ix in range(800):\n out = nn_instance.forward(X)\n loss = torch.sum((out.squeeze() - y) ** 2) / N\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(\"LOSS \", loss)\n return loss < 0.1",
"def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads",
"def _test_forward_pass(\n self,\n batch_input_shape,\n axis,\n fp64_tol=1e-14,\n fp32_tol=1e-6,\n fp16_tol=1e-2,\n ):\n param_shape = [batch_input_shape[i] for i in axis]\n param_elems = 1\n for dim in param_shape:\n param_elems *= dim\n beta = np.arange(param_elems, dtype=\"float64\").reshape(param_shape)\n gamma = np.arange(1, param_elems + 1, dtype=\"float64\").reshape(\n param_shape\n )\n x = np.random.normal(size=batch_input_shape)\n\n for epsilon in 1e-12, 1e-3:\n expected = self._expected_layer_norm(\n x, beta, gamma, batch_input_shape, axis, epsilon\n )\n for dtype in \"float64\", \"float32\", \"float16\":\n norm = layer_normalization.LayerNormalization(\n axis=axis,\n dtype=dtype,\n batch_input_shape=batch_input_shape,\n epsilon=epsilon,\n beta_initializer=keras.initializers.constant(beta),\n gamma_initializer=keras.initializers.constant(gamma),\n )\n y = norm(keras.backend.cast(x, dtype))\n actual = keras.backend.eval(y)\n\n if dtype == \"float64\":\n tol = fp64_tol\n elif dtype == \"float32\":\n tol = fp32_tol\n else:\n assert dtype == \"float16\"\n tol = fp16_tol\n\n # We use absolute tolerances in addition to relative tolerances,\n # because some of the values are very close to zero.\n self.assertAllClose(expected, actual, rtol=tol, atol=tol)",
"def test_softplus_activation(self):\n self.assertEqual(\n [0.4740769841801067, 0.9740769841801067], list(af.SoftPlus().output(np.array([-0.5, 0.5]))))\n self.assertEqual([0.3775406687981454, 0.6224593312018546], list(\n af.SoftPlus().derivative(np.array([-0.5, 0.5]))))",
"def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads",
"def test_forward(self):\n # test single input\n self.model.w = np.array([[0.5, 0.25]])\n self.model.b = 0.5\n x = np.array([[0.2, 0.1]])\n out = self.model.forward(x)\n self.assertTrue(np.abs(out[0] - 0.6514) < 0.01)\n\n # test multiple inputs\n self.model.w = np.array([[0.1, 0.2]])\n self.model.b = 0.2\n x = np.array([[0.3, 0.4],\n [0.5, 0.6]])\n out = self.model.forward(x)\n should_be = np.array([0.5769,0.5915])\n self.assertTrue(np.allclose(out, should_be, atol=0.01))",
"def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SquareLoss(predictedY, y)",
"def test(self):\n self.output = self.net.forward(Variable(self.source, volatile=True))\n self.loss = self.loss_function(self.output,\n Variable(self.target, volatile=True))",
"def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = \"test\" if y is None else \"train\"\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param[\"mode\"] = mode\n if self.normalization == \"batchnorm\":\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = mode\n scores = None\n\n cache_affine = []\n cache_bn = []\n cache_ln = []\n cache_relu = []\n cache_dropout = []\n \n # Forward Pass\n out = X\n for i in range(self.num_layers - 1):\n # Affine\n W, b = self.params['W' + str(i+1)], self.params['b' + str(i+1)]\n out, cache = affine_forward(out, W, b)\n cache_affine.append(cache)\n # BN\n if self.normalization=='batchnorm':\n gamma, beta = self.params['gamma' + str(i+1)], self. params['beta' + str(i+1)]\n out, cache = batchnorm_forward(out, gamma, beta, self.bn_params[i])\n cache_bn.append(cache)\n if self.normalization=='layernorm':\n gamma, beta = self.params['gamma' + str(i+1)], self.params['beta' + str(i+1)]\n out, cache = layernorm_forward(out, gamma, beta, self.bn_params[i])\n cache_ln.append(cache)\n # ReLU\n out, cache = relu_forward(out)\n cache_relu.append(cache)\n # Dropout\n if self.use_dropout:\n out, cache = dropout_forward(out, self.dropout_param)\n cache_dropout.append(cache)\n # Input update\n x = out\n \n # Last Layer\n W, b = self.params['W' + str(self.num_layers)], self.params['b' + str(self.num_layers)]\n scores, cache = affine_forward(x, W, b)\n cache_affine.append(cache)\n\n # If test mode return early\n if mode == \"test\":\n return scores\n\n loss, grads = 0.0, {}\n\n N = X.shape[0]\n\n weight_name = 'W' + str(self.num_layers) \n bias_name = 'b' + str(self.num_layers)\n\n # Loss calculation\n loss, dx = softmax_loss(scores, y)\n # Last layer backwards\n dout, grads[weight_name], grads[bias_name] = affine_backward(dx, cache_affine.pop())\n # Last layer regularization\n loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name]))\n #grads[weight_name] /= N\n grads[weight_name] += self.reg * self.params[weight_name]\n # Layers: self.num_layer - 1 -> 1\n i = self.num_layers - 2\n while i >= 0:\n # Dropout\n if self.use_dropout:\n dout = dropout_backward(dout, cache_dropout.pop())\n # ReLU\n dout = relu_backward(dout, cache_relu.pop())\n # BN\n if self.normalization=='batchnorm':\n dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = batchnorm_backward(dout, cache_bn.pop())\n #LN\n if self.normalization=='layernorm':\n dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = layernorm_backward(dout, cache_ln.pop())\n # Affine\n weight_name = 'W' + str(i+1) \n bias_name = 'b' + str(i+1)\n dout, grads[weight_name], grads[bias_name] = affine_backward(dout, cache_affine.pop())\n # Regularization\n loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name]))\n #grads[weight_name] /= N\n grads[weight_name] += self.reg * self.params[weight_name]\n i -= 1\n\n return loss, grads",
"def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)",
"def test(self):\n with torch.no_grad():\n self.forward()",
"def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)",
"def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads",
"def loss(A, Y):\n return A - Y",
"def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads",
"def forward(self,y_out, y_truth): \n result = None\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the BCE loss. #\n #########################################################################\n\n result = -1 * (np.multiply(y_truth, np.log(y_out)) + np.multiply((1 - y_truth), np.log(1 - y_out)))\n \n \n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result",
"def fwd_pass( self, X, ls_emb, sp_emb, y, optimizer, loss_function, train=False, report=True):\n if train:\n self.zero_grad()\n outputs = self(X, ls_emb, sp_emb)\n loss = loss_function(outputs, y)\n\n if train:\n loss.backward()\n optimizer.step()\n\n if report:\n tp = 0\n tn = 0\n fn = 0\n fp = 0\n for i,j in zip(outputs, y):\n if torch.argmax(i) == torch.argmax(j):\n if j.data.cpu().numpy()[0] == 1: #positive instance\n tp += 1\n else: \n tn += 1\n else:\n if j.data.cpu().numpy()[0] == 1:\n fn += 1\n else:\n fp += 1\n\n #matches = [torch.argmax(i) == torch.argmax(j) for i,j in zip(outputs, y)]\n acc = (tp+tn)/(tp+tn+fp+fn)\n conf = [tp, fp, fn, tn]\n return acc, loss, conf\n else:\n return None,None",
"def testing_call(self, inputs, y, branch):\n self.eval()\n with torch.no_grad():\n out = self.forward(inputs, branch=branch)\n\n # Loss computation\n loss_obj = self.branch_losses[branch]\n loss = loss_obj(out, y)\n return out, loss.item()"
] | [
"0.6932652",
"0.68198997",
"0.65537393",
"0.65324885",
"0.6529143",
"0.63533276",
"0.6301397",
"0.6298944",
"0.628218",
"0.62585825",
"0.6255691",
"0.62509155",
"0.6244331",
"0.6233",
"0.6224718",
"0.62080723",
"0.6198414",
"0.6166633",
"0.61483914",
"0.6138617",
"0.6106742",
"0.6097672",
"0.60733485",
"0.6070457",
"0.6067006",
"0.606437",
"0.60563064",
"0.6049526",
"0.6046674",
"0.60460764"
] | 0.8524856 | 0 |
Tests the ability of the gradient descent algorithm to classify a linearly separable dataset. | def test_gradient_descent_blobs():
features, _, targets, _ = load_data('blobs')
hinge = make_predictions(features, targets, 'hinge', None)
# assert np.all(hinge == targets)
# l1_hinge = make_predictions(features, targets, 'hinge', 'l1')
# # assert np.all(l1_hinge == targets)
#
# l2_hinge = make_predictions(features, targets, 'hinge', 'l2')
# # assert np.all(l2_hinge == targets)
#
# squared = make_predictions(features, targets, 'squared', None)
# # assert np.all(squared == targets)
#
# l1_squared = make_predictions(features, targets, 'squared', 'l1')
#
# l2_squared = make_predictions(features, targets, 'squared', 'l2') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_train_dataset(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n expected = [\n {'alpha': 0.6931471805599453,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 1.3},\n {'alpha': 0.9729550745276565,\n 'dim': 1,\n 'inequal': 'lt',\n 'threshold': 1.0},\n {'alpha': 0.8958797346140273,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 0.90000000000000002}\n ]\n self.assertEqual(classifiers, expected)",
"def test_multiclass_gradient_descent_blobs():\n from your_code import MultiClassGradientDescent\n\n np.random.seed(0)\n\n features, _, targets, _ = load_data('blobs')\n\n learner = MultiClassGradientDescent(loss='squared', regularization=None,\n learning_rate=0.01, reg_param=0.05)\n learner.fit(features, targets, batch_size=None, max_iter=1000)\n predictions = learner.predict(features)\n\n print(\"predictions: \", predictions)\n print(\"targets: \", targets)",
"def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs",
"def test_gradient_convergence(self):\n pass",
"def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")",
"def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss",
"def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass",
"def test_class_logistic(\n y_train_dep,\n y_test_dep,\n y_train_preds_lr,\n y_test_preds_lr):\n # Ensure the function works\n try:\n cls.classification_logistic_results(\n y_train_dep, y_test_dep, y_train_preds_lr, y_test_preds_lr\n )\n logging.info(\n \"Successfully Plotting Classification Results using logistic regression\"\n )\n except Exception as err:\n logging.error(\"Errors in plotting logistic classification results\")\n raise err\n # Ensure the output exists\n try:\n assert os.path.isfile(\"images/results/assessment_logistic.png\")\n except AssertionError as err:\n logging.error(\"Errors in plotting logistic classification file\")\n raise err",
"def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n converged = False\n while not converged:\n failures = 0\n for item, classification in dataset.iterate_once(1):\n prediction = self.get_prediction(item)\n if prediction != nn.as_scalar(classification):\n failures += 1\n self.w.update(item, nn.as_scalar(classification))\n if failures == 0:\n converged = True",
"def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)",
"def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc",
"def testRegression(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n regressor = tf.contrib.learn.DNNRegressor(feature_columns=cont_features,\n hidden_units=[3, 3])\n\n regressor.fit(input_fn=_iris_input_multiclass_fn, steps=1000)\n regressor.evaluate(input_fn=_iris_input_multiclass_fn, steps=100)",
"def test_machine_learning():",
"def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)",
"def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff",
"def train(self, dataset): \n dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.\n #print dataset\n \n ########\n # Compute p(y=1) for all ys.\n ########\n label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted. \n self.p_ys = label_counts * 1.0 / len(dataset) # Compute probs. \n \n ########\n # Compute p(x|y) for all x,y.\n ########\n self.feature_count = len(dataset[0]) - 1 \n self.class_count = len(label_counts)\n \n self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix\n start_index = 0\n for i in range(self.class_count): # Loop over each class \n end_index = start_index + label_counts[i] # end of this class index \n denominator = label_counts[i] + 2.0 * self.alpha\n \n for j in range(self.feature_count): # Loop over each feature\n numerator = np.sum(dataset[start_index:end_index,j]) + self.alpha # Sum number of times word j = 1 in class i\n self.p_xi_given_ys[i][j] = numerator * 1.0 / denominator # Compute p(xi|y)\n \n start_index = end_index",
"def test(self):\r\n error_count = 0\r\n N_TESTING = len(self.TESTING_DATA)\r\n for i in range(N_TESTING):\r\n x_vec = self.TESTING_DATA[i][:-1]\r\n y = self.TESTING_DATA[i][-1]\r\n\r\n result = self.bp.classify(x_vec)\r\n if result != y: error_count += 1\r\n print(error_count, \" errors on the test data, out of \", N_TESTING, \"items.\")",
"def train(self, dataset): \n dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.\n #print dataset\n \n ########\n # Compute p(y=1) for all ys.\n ########\n label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted. \n self.p_ys = np.log(label_counts * 1.0 / len(dataset)) # Compute probs. \n \n ########\n # Compute p(x|y) for all x,y.\n ########\n self.feature_count = len(dataset[0]) - 1 \n self.class_count = len(label_counts)\n \n self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix\n start_index = 0\n for i in range(self.class_count): # Loop over each class \n end_index = start_index + label_counts[i] # end of this class index \n class_word_counts = np.sum(dataset[start_index:end_index,:-1]) # sum all words of class i \n denominator = class_word_counts + self.alpha * self.feature_count # Here we add the feature_count as Laplace smoothing\n \n for j in range(self.feature_count): # Loop over each feature\n single_word_count = np.sum(dataset[start_index:end_index,j]) # sum number times word j appears in class i \n numerator = single_word_count + self.alpha\n self.p_xi_given_ys[i][j] = log(numerator * 1.0 / denominator) # Compute p(xi|y)\n \n start_index = end_index",
"def linear_classifier(position_array, class_array, n_classes):\n\n # linear classifier\n with tf.Graph().as_default():\n # YOUR CODE FOR PROBLEM 6A GOES HERE\n # Build neural network\n net = tflearn.input_data(shape=[None, 2])\n # 'None' always has to be the first parameter in shape because it tells\n # tensor flow that the number of data points we have can be variable\n # and 2 for 2 input nodes (x and y coordinates)\n\n net = tflearn.fully_connected(net, n_classes, activation='softmax') # layer with 4 nodes and softmax\n net = tflearn.regression(net, loss='categorical_crossentropy') #regression with categorical_crossentropy\n\n # Define model\n model = tflearn.DNN(net)\n new_class_array = np.zeros((len(class_array), 4))\n index = 0\n\n #change to be 4 dimensional\n for x in class_array:\n if x == 0:\n new_class_array[index] = [1,0,0,0]\n elif x == 1:\n new_class_array[index] = [0,1,0,0]\n elif x == 2:\n new_class_array[index]= [0,0,1,0]\n elif x == 3:\n new_class_array[index] = [0,0,0,1]\n index +=1 \n\n # Start training (apply gradient descent algorithm)\n model.fit(position_array, new_class_array, n_epoch=10, batch_size=10, show_metric=True, snapshot_step=1)\n return position_array, new_class_array, model",
"def non_linear_classifier(position_array, class_array, n_classes):\n with tf.Graph().as_default():\n # YOUR CODE FOR PROBLEM 6C GOES HERE\n # Build neural network\n net = tflearn.input_data(shape=[None, 2])\n # 'None' always has to be the first parameter in shape because it tells\n # tensor flow that the number of data points we have can be variable\n # and 2 for 2 input nodes (x and y coordinates)\n #sgd = tflearn.optimizers.SGD(learning_rate=2.0, lr_decay=0.5, decay_step=100)\n\n net = tflearn.fully_connected(net, 20000, activation='relu') # 20,0000 nodes\n net = tflearn.fully_connected(net, n_classes, activation='softmax') # layer with 4 nodes and softmax\n net = tflearn.regression(net, loss='categorical_crossentropy') #regression with categorical_crossentropy\n\n # Define model\n model = tflearn.DNN(net)\n new_class_array = np.zeros((len(class_array), 4))\n index = 0\n\n #change to be four dimensional\n for x in class_array:\n if x == 0:\n new_class_array[index] = [1,0,0,0]\n elif x == 1:\n new_class_array[index] = [0,1,0,0]\n elif x == 2:\n new_class_array[index]= [0,0,1,0]\n elif x == 3:\n new_class_array[index] = [0,0,0,1]\n index +=1 \n\n # Start training (apply gradient descent algorithm)\n model.fit(position_array, new_class_array, n_epoch=10, batch_size=10, show_metric=True, snapshot_step=1)\n return position_array, new_class_array, model",
"def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0",
"async def train(gradient_boosting: bool = False) -> bool:\n data = clf.dataset()\n return clf.train(data['X'], data['y'], gradient_boosting)",
"def test_model(net, data_loader):\n net.eval()\n true_preds, count = 0.0, 0\n for imgs, labels in data_loader:\n imgs, labels = imgs.to(device), labels.to(device)\n with torch.no_grad():\n preds = net(imgs).argmax(dim=-1)\n true_preds += (preds == labels).sum().item()\n count += labels.shape[0]\n test_acc = true_preds / count\n return test_acc",
"def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy",
"def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r",
"def run_one_epoch(self, dataloader, gradient_descent):\n debug = self.debug\n\n predictions_list = []\n scores_list = []\n ground_truth_list = []\n losses_list = []\n self.train(gradient_descent) # do not use dropout during train mode\n if gradient_descent:\n self.adapt_CCA(dataloader)\n\n\n for i_batch, (X, Y) in enumerate(dataloader):\n\n # if the model is ensemble of models, X is a dictionnary of tensors\n if isinstance(X, dict):\n for t in X.values(): t.requires_grad = gradient_descent\n else: # X is a tensor\n X.requires_grad = gradient_descent\n\n scores, predictions, dissimilar_features_dict = self.predict(X)\n\n CE_loss = self.criterion(scores, Y) # loss of the current batch\n decorrelation_loss = self.compute_decorrelation_loss(dissimilar_features_dict)\n\n\n n_couples =len(self.couple_signals)\n\n loss = CE_loss + (self.loss_coef/n_couples) * decorrelation_loss\n\n losses_list.append(loss.detach())\n scores_list.append(scores.detach())\n predictions_list.append(predictions)\n\n ground_truth_list.append(Y)\n\n if gradient_descent:\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n else:\n loss = loss.detach()\n\n if gradient_descent:\n print(i_batch)\n self.adapt_CCA(dataloader)\n\n\n\n\n if self.plot_conflict and gradient_descent: # plot on the train set only\n self.record_losses_conflict(X,Y)\n\n\n\n\n if len(dataloader)>0: # if the dataloader was nonempty:\n losses_list = [loss.item() for loss in losses_list]\n predictions = torch.cat([pred for pred in predictions_list], dim=0).to(torch.device('cpu')).detach().numpy()\n ground_truth = torch.cat([ gt for gt in ground_truth_list], dim=0).to(torch.device('cpu')).detach().numpy()\n scores = torch.cat([score for score in scores_list ], dim=0).to(torch.device('cpu')).detach().numpy()\n\n del ground_truth_list, scores_list, predictions_list\n # we fetch all data from the GPU at once, to minimize the time the GPU and CPU spend waiting each other.\n\n average_loss = np.mean(losses_list) # average over the batches\n\n f1_value = f1_score(ground_truth, predictions, average='macro')\n\n # mode = 'train' if gradient_descent else 'val'\n # print(\"{}: loss = {:.3f}, F1_score = {:.3f}%\".format(mode, average_loss, 100*f1_value))\n\n\n else: # the dataloader was empty\n average_loss, f1_value = -1, -1\n predictions, scores, ground_truth = np.array([]), np.array([]), np.array([])\n\n if debug:\n print('cuda.memory_allocated = %.2f Go' % (torch.cuda.memory_allocated()/10**9))\n\n return average_loss, f1_value, predictions, scores, ground_truth",
"def test(self, dataset):\n model_path = os.path.join(self.check_point, 'model.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n\n self.model = torch.load(model_path)\n print(self.model)\n model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n print(1.0 * params / (1000 * 1000))\n _, _, stats, outputs, names = self._check_PSNR(dataset, is_test=True)\n return stats, outputs, names",
"def disc_step(real_data,fake_data):\n with tf.GradientTape() as tape:\n loss = discriminator_loss(real_data,fake_data)\n loss = tf.add_n([loss] + discriminator.losses)\n gradients = tape.gradient(loss, discriminator.trainable_variables)\n d_optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))\n return loss",
"def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred",
"def test_simple(self):\n [X, labels, Y] = self.gen_data()\n\n # Call algorithm\n bias = multiLogReg(self.sds.from_numpy(\n X), self.sds.from_numpy(Y), verbose=False).compute()\n\n # Calculate result.\n res = np.reshape(np.dot(X, bias[:len(X[0])]) + bias[len(X[0])], (250))\n def f2(x): return (x < 0) + 1\n accuracy = np.sum(labels == f2(res)) / 250 * 100\n\n self.assertTrue(accuracy > 98)"
] | [
"0.6490156",
"0.6451971",
"0.64335155",
"0.6392328",
"0.63797176",
"0.62720233",
"0.6257553",
"0.6255466",
"0.6249252",
"0.623954",
"0.6229559",
"0.62094355",
"0.6200106",
"0.61554265",
"0.61163557",
"0.6115135",
"0.61097056",
"0.6069717",
"0.60438246",
"0.60386896",
"0.60309684",
"0.6021184",
"0.6003281",
"0.59838766",
"0.59645253",
"0.59639996",
"0.59093034",
"0.5905873",
"0.5898636",
"0.5883033"
] | 0.65788424 | 0 |
Returns a client configured with the given MetaHttpClient | def __init__(self, metaHttpClient):
self.httpClient = metaHttpClient | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_httpx_client() -> httpx.Client:\n return httpx.Client(**CLIENT_PARAMETERS) # type: ignore",
"def _obtain_http_client(hostname=METADATA_SERVER_HOSTNAME):\n return http.client.HTTPConnection(hostname,\n timeout=METADATA_SERVER_CONN_TIMEOUT)",
"def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)",
"def createClientFromUrl(url, authStrategy=None):\n return MetaClient(MetaHttpClient(url, authStrategy))",
"def create_http_client(http_client_name=None, metrics=None, cache=None, config=None):\n client_config = config and config.requests.get_section(http_client_name or 'default')\n cache_ttl = None\n max_retries = 0\n client_cache = None\n api_name = http_client_name\n\n if client_config:\n if cache and client_config.cache.get('enabled'):\n client_cache = cache.http\n cache_ttl = client_config.cache.get('ttl')\n max_retries = client_config.get('max_retries')\n api_name = client_config.get('api_name', api_name)\n\n return HttpClient(api_name=api_name,\n metrics=metrics,\n cache=client_cache,\n cache_ttl=cache_ttl,\n max_retries=max_retries)",
"def _client(self) -> httpx.Client:\n return httpx.Client(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )",
"def client(metadata_response) -> airbase.AirbaseClient:\n return airbase.AirbaseClient()",
"def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)",
"def get_aynsc_httpx_client() -> httpx.AsyncClient:\n return httpx.AsyncClient(**CLIENT_PARAMETERS) # type: ignore",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def client():\n return Client(**common_data.AUTH_ARGS)",
"def _client(self) -> httpx.AsyncClient:\n return httpx.AsyncClient(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )",
"def get_http_client():\n store = file.Storage(TOKEN_STORE_FILE)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_ID_FILE, SCOPES)\n creds = tools.run_flow(flow, store)\n return creds.authorize(Http())",
"def get_client(\n service: str,\n version: str,\n http: Optional[Union[httplib2.Http, api_httplib.HttpMock]] = None,\n request_builder: Union[\n Type[api_httplib.HttpRequest],\n api_httplib.RequestMockBuilder] = api_httplib.HttpRequest\n) -> discovery.Resource:\n static_discovery = False if isinstance(http, api_httplib.HttpMock) else None\n return discovery.build(\n service,\n version,\n num_retries=_NUMBER_OF_RETRIES,\n http=http,\n requestBuilder=request_builder,\n static_discovery=static_discovery)",
"def get_auth_http_client(self):\n return httpclient.AsyncHTTPClient()",
"def get_http_client() -> httpx.AsyncClient:\n app = basic_app.API()\n return httpx.AsyncClient(\n app=app,\n base_url='http://localhost'\n )",
"def get_client(self):\n return self.client",
"def get_client(self, service):\n try:\n return boto3.client(service, region_name=self.region, config=self.proxy_config)\n except ClientError as e:\n fail(\"AWS %s service failed with exception: %s\" % (service, e))",
"def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)",
"def _http_client(self):\n\n self.__enforce_connected()\n return self.collection._http_client",
"def api_client() -> APIClient:\n return APIClient()",
"def api_client() -> APIClient:\n\n return APIClient()",
"def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)",
"def get_mqtt_client(self, client_id: str) -> Client:\n return Client(client_id)",
"def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)",
"def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def _init_http_client(service_id=None, opts=None):\n if service_id:\n opts = _get_trs_opts(service_id)\n\n http_client = RequestsClient()\n\n http_client.set_api_key(host=opts['host'],\n api_key=opts['auth'],\n param_in='header')\n return http_client",
"def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client",
"def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)",
"def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client"
] | [
"0.69275284",
"0.69132006",
"0.6854978",
"0.6838644",
"0.67625606",
"0.6725256",
"0.6679172",
"0.65378565",
"0.6535458",
"0.6499919",
"0.64733833",
"0.6437994",
"0.6422018",
"0.6411563",
"0.6403961",
"0.63889176",
"0.6383493",
"0.6340762",
"0.6308783",
"0.6306034",
"0.6300599",
"0.62501276",
"0.62378615",
"0.6223984",
"0.620609",
"0.62012523",
"0.6102882",
"0.60905445",
"0.6082561",
"0.6077038"
] | 0.6965378 | 0 |
Factory method to reate a new client from url and auth strategy. | def createClientFromUrl(url, authStrategy=None):
return MetaClient(MetaHttpClient(url, authStrategy)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client():\n return Client(**common_data.AUTH_ARGS)",
"def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)",
"def client_from_config(cls, base_client, conf, logger=None):\n _unused = conf\n if cls == PapiViewClient:\n # we're implementing this factory in the base-class, so we don't\n # have to copy-paste it into every single view. This means that\n # someone could invoke it in the abstract base, which does not make\n # any sense, so we have to catch it\n raise Exception(\"Factory must be called on a specific \"\n \"PapiViewClient subclass\")\n return cls(base_client, logger)",
"def _create_instance(cls, configuration, auth_type):\n auth = ClientAuthFactory.get(\n username=configuration.username,\n password=configuration.password,\n auth_type=auth_type\n )\n instance = HttpClient(configuration.url, auth)\n cls._INSTANCES[configuration] = instance\n return instance",
"def get_sli_client_factory(client_id, client_secret, api_url, callback_url): \n callback_path = urlparse(callback_url).path \n\n def sli_client_factory():\n return SLIClient(client_id, client_secret, api_url, callback_url)\n\n return (callback_path, sli_client_factory)",
"def view_with_client_from_config(cls, conf, config_section, logger=None):\n if cls == PapiViewClient:\n # we're implementing this factory in the base-class, so we don't\n # have to copy-paste it into every single view. This means that\n # someone could invoke it in the abstract base, which does not make\n # any sense, so we have to catch it\n raise Exception(\"Factory must be called on a specific \"\n \"PapiViewClient subclass\")\n\n base_client = papi_client.papi_client.PapiClientFactory.\\\n client_from_config(conf, config_section, logger)\n return cls.client_from_config(base_client, conf, logger)",
"def buildProtocol(self, addr):\n if hasattr(settings, \"DISCORD_SESSION_CLASS\"):\n protocol_class = class_from_module(\n settings.DISCORD_SESSION_CLASS, fallback=DiscordClient\n )\n protocol = protocol_class()\n else:\n protocol = DiscordClient()\n\n protocol.factory = self\n protocol.sessionhandler = self.sessionhandler\n return protocol",
"def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)",
"def _get_auth_client(self, request):\n if self._auth_client is None:\n # Use PyFxa defaults if not specified\n server_url = fxa_conf(request, 'oauth_uri')\n auth_cache = self._get_cache(request)\n self._auth_client = OAuthClient(server_url=server_url, cache=auth_cache)\n\n return self._auth_client",
"def recreate_client(token=None):\n if token:\n # If we've successfully retrieved the token from the session (or have\n # been provided with a token), get authorization.\n auth = get_spotify_auth(token)\n # TODO make sure auth token uses this too\n auth.refresh_token_if_needed(app_config[\"SPOTIFY_AUTH\"][\"token_duration\"])\n return Client(auth, session.get(\"client_session\"))\n else:\n return None",
"def create_client(self) -> None:\n pass",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def new(\n host: str = \"localhost\",\n port: int = 4110,\n user: str = \"pyserval\",\n passwd: str = \"pyserval\",\n ):\n connection = RestfulConnection(host=host, port=port, user=user, passwd=passwd)\n return LowLevelClient(connection=connection)",
"def make_client(\n host_url: str, email: str, password: str, timeout: float = 30\n) -> Client:\n if host_url.endswith(\"/\"):\n raise ValueError('host_url must not end with \"/\"')\n host_url += \"/api/v1\"\n return Client(\n host_url,\n headers=make_auth_headers(email, password),\n timeout=timeout,\n verify_ssl=False,\n raise_on_unexpected_status=True,\n )",
"def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client",
"def __init__(self, url = None, context = \"corbaserver\"):\n self._initOrb (url)\n self._makeClients (\"manipulation\", self.defaultClients, context)",
"def create_api_client(base_path, access_token):\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(header_name=\"Authorization\",\n header_value=f\"Bearer {access_token}\")\n return api_client",
"def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret",
"def recreate_client_from_session():\n token = session.get(\"spotify_token\")\n return recreate_client(token)",
"def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client",
"def __azure_client_factory(experiment_secrets: Secrets, client_name: str) -> object:\n secrets = load_secrets(experiment_secrets)\n with auth(secrets) as authentication:\n base_url = secrets.get('cloud').endpoints.resource_manager\n scopes = [base_url + \"/.default\"]\n client = eval(client_name)(\n credential=authentication,\n credential_scopes=scopes,\n base_url=base_url)\n\n return client",
"def twitter_factory(authenticator):\n client = None\n\n def _client():\n \"\"\" Creates and setups the twitter client if not already done.\"\"\"\n nonlocal client\n if not client:\n client = twitter.Twitter(authenticator)\n return client\n\n return _client # return the function that returns the client",
"def update_api_client(\n host,\n port,\n auth_type=REQUEST.AUTH_TYPE.BASIC,\n scheme=REQUEST.SCHEME.HTTPS,\n auth=None,\n):\n\n global _API_CLIENT_HANDLE\n\n update_connection_handle(host, port, auth_type, scheme=scheme, auth=auth)\n connection = get_connection_handle(host, port, auth_type, scheme, auth)\n _API_CLIENT_HANDLE = ClientHandle(connection)\n _API_CLIENT_HANDLE._connect()\n\n return _API_CLIENT_HANDLE",
"def _client(self) -> httpx.Client:\n return httpx.Client(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )",
"def __init__(self, **kwargs):\r\n super(Client, self).__init__()\r\n self.httpclient = client.HTTPClient(**kwargs)\r\n self.version = '2.0'\r\n self.format = 'json'\r\n self.action_prefix = \"/v%s\" % (self.version)\r\n self.retries = 0\r\n self.retry_interval = 1",
"def __new__(cls, host=None, user=None, client=None):\n cls.__check_parameters(host=host, user=user)\n if client is None:\n raise InvalidClientException(\"Integrated Client during connection creation can't be None\")\n return super(Connection, cls).__new__(cls, host=host, user=user, client=client)",
"def __init__(self, client):\n\n self.client = client",
"def __init__(self, client):\n self.client = client",
"def _get_client(wsdl_url, cache_duration=(\"default\",)):\n global _suds_client\n\n print(wsdl_url)\n # Handle new or changed client request (create new client)\n if _suds_client is None or _suds_client.wsdl.url != wsdl_url:\n _suds_client = Client(wsdl_url)\n if cache_duration is None:\n _suds_client.set_options(cache=None)\n else:\n cache = _suds_client.options.cache\n # could add some error catching ...\n if cache_duration[0] == \"default\":\n cache.setduration(days=1)\n else:\n # noinspection PyTypeChecker\n cache.setduration(**dict([cache_duration]))\n\n return _suds_client",
"def make_oauth_client(base_url) -> requests.Session:\n config_file = os.path.join(os.environ['HOME'], '.allurarc')\n cp = ConfigParser()\n cp.read(config_file)\n\n REQUEST_TOKEN_URL = base_url + '/rest/oauth/request_token'\n AUTHORIZE_URL = base_url + '/rest/oauth/authorize'\n ACCESS_TOKEN_URL = base_url + '/rest/oauth/access_token'\n oauth_key = option(cp, base_url, 'oauth_key',\n 'Forge API OAuth Consumer Key (%s/auth/oauth/): ' % base_url)\n oauth_secret = option(cp, base_url, 'oauth_secret',\n 'Forge API Oauth Consumer Secret: ')\n\n try:\n oauth_token = cp.get(base_url, 'oauth_token')\n oauth_token_secret = cp.get(base_url, 'oauth_token_secret')\n except NoOptionError:\n oauthSess = OAuth1Session(oauth_key, client_secret=oauth_secret, callback_uri='oob')\n request_token = oauthSess.fetch_request_token(REQUEST_TOKEN_URL)\n pin_url = oauthSess.authorization_url(AUTHORIZE_URL, request_token['oauth_token'])\n if isinstance(webbrowser.get(), webbrowser.GenericBrowser):\n print(\"Go to %s\" % pin_url)\n else:\n webbrowser.open(pin_url)\n oauth_verifier = input('What is the PIN? ')\n access_token = oauthSess.fetch_access_token(ACCESS_TOKEN_URL, oauth_verifier)\n oauth_token = access_token['oauth_token']\n oauth_token_secret = access_token['oauth_token_secret']\n\n cp.set(base_url, 'oauth_token', oauth_token)\n cp.set(base_url, 'oauth_token_secret', oauth_token_secret)\n # save oauth token for later use\n cp.write(open(config_file, 'w'))\n print(f'Saving oauth tokens in {config_file} for later re-use')\n print()\n\n else:\n oauthSess = OAuth1Session(oauth_key, client_secret=oauth_secret,\n resource_owner_key=oauth_token, resource_owner_secret=oauth_token_secret)\n\n return oauthSess"
] | [
"0.6883496",
"0.6652142",
"0.65504664",
"0.63140374",
"0.62927",
"0.6198969",
"0.61564",
"0.6089419",
"0.6076771",
"0.60702914",
"0.60631484",
"0.60540426",
"0.6042376",
"0.6015032",
"0.5999247",
"0.5936448",
"0.5922538",
"0.59159464",
"0.58842707",
"0.5857151",
"0.58331627",
"0.58257943",
"0.5824407",
"0.5822359",
"0.5798051",
"0.5777847",
"0.57650816",
"0.57611924",
"0.5756147",
"0.57482105"
] | 0.74867505 | 0 |
Function for loading the features and labels associated with the training dataset. | def _loadTrain(self, features, labels):
self.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_features(self, features):\n pass\n # self.features = features",
"def read_data(feature_file, label_file):",
"def train(self, features, labels):\n pass",
"def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels",
"def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)",
"def _load_training_data(self):\n self._save_training_data()",
"def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels",
"def prepare_data(self):\n if not os.path.exists(self.hparams.data_cache_dir):\n os.mkdir(self.hparams.data_cache_dir)\n for mode, filepath in zip(['train', 'val', 'test'],\n [self.hparams.train_path, self.hparams.val_path, self.hparams.test_path]):\n if mode == 'train':\n label_mode = self.hparams.label_mode\n else:\n label_mode = 'major'\n cached_features_file = self._feature_file(mode, label_mode)\n\n if not os.path.exists(cached_features_file):\n logger.info('Creating features from dataset file at %s', filepath)\n examples = read_examples_from_file(filepath, mode, label_mode)\n features = convert_examples_to_features(\n examples,\n self.labels,\n self.hparams.max_seq_length,\n self.tokenizer,\n cls_token_at_end=bool(self.hparams.model_type in ['xlnet']),\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if self.hparams.model_type in ['xlnet'] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=bool(self.hparams.model_type in ['roberta']),\n pad_on_left=bool(self.hparams.model_type in ['xlnet']),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if self.hparams.model_type in ['xlnet'] else 0,\n pad_token_label_id=self.pad_token_label_id,\n )\n logger.info('Saving features into cached file %s', cached_features_file)\n torch.save(features, cached_features_file)",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label",
"def train(self, features, labels):\n self.train_features = features\n self.train_labels = labels\n #raise NotImplementedError",
"def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]",
"def load_features(self, dataset, G, embeddings):\n feature_num = dataset.num_features\n node_num = dataset[0].y.shape[0]\n features_matrix = np.zeros(node_num, feature_num)\n for vertex, node in enumerate(G.nodes()):\n features_matrix[node] = embeddings[vertex]",
"def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features",
"def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet",
"def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels",
"def load_features_predictors():\n pwd = \"./data/\"\n if __name__ == \"__main__\":\n pwd = \".\" + pwd\n else:\n pass\n\n fn1 = os.path.join(pwd, \"features.npy\")\n fn2 = os.path.join(pwd, \"predictors.npy\")\n\n X = np.load(fn1)\n y = np.load(fn2)\n return X, y",
"def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)",
"def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)",
"def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]",
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def load_training_data(fname):\n all_data = load_csv(fname, 'excel-tab')\n\n labels = [rec[2] == 'OFF' for rec in all_data]\n data = [convert_to_reals(clean_text(rec[1])) for rec in all_data]\n max_features = max([len(rec) for rec in data])\n\n # Pad the data\n for rec in data:\n rec.extend([0.0] * (max_features - len(rec)))\n\n return labels, data, max_features",
"def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]",
"def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)",
"def __init__(self, data_filename):\n with open(data_filename, 'rb') as data_file:\n loaded_features = pickle.load(data_file)\n self.title_nlp_tfidf_features = loaded_features['title_NLP_TFIDF_features']\n self.other_features = loaded_features['other_features']\n self.category1_features = loaded_features['category1_features']\n self.category2_features = loaded_features['category2_features']\n self.category3_features = loaded_features['category3_features']\n self.material_features = loaded_features['material_features']\n self.who_made_features = loaded_features['whoMade_features']\n self.when_made_features = loaded_features['whenMade_features']\n self.style1_features = loaded_features['style1_features']\n self.style2_features = loaded_features['style2_features']\n self.feature_labels = loaded_features['feature_labels']"
] | [
"0.72724336",
"0.71050906",
"0.70961773",
"0.7020884",
"0.6974502",
"0.69578195",
"0.68213254",
"0.68110377",
"0.68032",
"0.67908686",
"0.6778066",
"0.6760534",
"0.67411566",
"0.6715979",
"0.66920966",
"0.66892487",
"0.66885465",
"0.66482824",
"0.6603724",
"0.65990937",
"0.6596825",
"0.65963936",
"0.65901184",
"0.6585779",
"0.65728533",
"0.6556432",
"0.65537137",
"0.6547047",
"0.6525673",
"0.6523202"
] | 0.7857445 | 0 |
Function for loading the features and labels associated with the testing dataset. | def _loadTest(self, features, labels):
self.testX_, self.testY_, self.testLabel_ = self.__load(features, labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features",
"def loadTest(yObject,yMatch,features):\n\n nf = []\n for f in features:\n if f != 'label':\n nf.append(f)\n \n print 'Train features: {}'.format(features)\n print 'Test features: {}'.format(nf)\n \n # load test subject data, save as attribtues\n tObject = ld.loadH5(yObject,*['full'])\n ID = tObject.attrs['ID']\n\n parsedData = ld.parseH5(tObject,nf)\n tObject.close()\n\n data = parsedData[ID]\n mtd = cu.mergeFeatures(data,nf)\n\n threshed = ld.loadMat(yMatch)\n\n ltvm = cu.vertexMemberships(threshed,180)\n\n return [threshed,mtd,ltvm]",
"def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label",
"def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)",
"def read_data(feature_file, label_file):",
"def _loadTrain(self, features, labels):\n\t\tself.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)",
"def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y",
"def load_features(self, features):\n pass\n # self.features = features",
"def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset",
"def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader",
"def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels",
"def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet",
"def train(self, features, labels):\n pass",
"def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test",
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def load_datasets(args, train_test_split=0):\n logger.info(\"Loading data...\")\n df_data_path = \"./data/df_data.pkl\"\n graph_path = \"./data/text_graph.pkl\"\n if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):\n logger.info(\"Building datasets and graph from raw data... Note this will take quite a while...\")\n generate_text_graph(args.train_data, args.infer_data, args.max_vocab_len)\n df_data = load_pickle(\"df_data.pkl\")\n G_dict = load_pickle(\"text_graph.pkl\")\n G = G_dict[\"graph\"]\n \n if train_test_split == 0:\n infer_idx_start = G_dict[\"infer_idx_start\"]\n del G_dict\n \n logger.info(\"Building adjacency and degree matrices...\")\n A = nx.to_numpy_matrix(G, weight=\"weight\"); A = A + np.eye(G.number_of_nodes())\n degrees = []\n for d in G.degree(weight=None):\n if d == 0:\n degrees.append(0)\n else:\n degrees.append(d[1]**(-0.5))\n degrees = np.diag(degrees)\n X = np.eye(G.number_of_nodes()) # Features are just identity matrix\n A_hat = degrees@A@degrees\n f = X # (n X n) X (n X n) x (n X n) X (n X n) input of net\n \n if train_test_split == 1:\n logger.info(\"Splitting labels for training and inferring...\")\n ### stratified test samples\n test_idxs = []\n for b_id in df_data[\"label\"].unique():\n dum = df_data[df_data[\"label\"] == b_id]\n if len(dum) >= 4:\n test_idxs.extend(list(np.random.choice(dum.index, size=round(args.test_ratio*len(dum)), replace=False)))\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n # select only certain labelled nodes for semi-supervised GCN\n selected = []\n for i in range(len(df_data)):\n if i not in test_idxs:\n selected.append(i)\n save_as_pickle(\"selected.pkl\", selected)\n else:\n logger.info(\"Preparing training labels...\")\n test_idxs = [i for i in range(infer_idx_start, len(df_data))]\n selected = [i for i in range(infer_idx_start)]\n save_as_pickle(\"selected.pkl\", selected)\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n \n f_selected = f[selected]; f_selected = torch.from_numpy(f_selected).float()\n f_not_selected = f[test_idxs]; f_not_selected = torch.from_numpy(f_not_selected).float()\n labels_selected = list(df_data.loc[selected]['label'])\n if train_test_split == 1: \n labels_not_selected = list(df_data.loc[test_idxs]['label'])\n else:\n labels_not_selected = []\n \n f = torch.from_numpy(f).float()\n save_as_pickle(\"labels_selected.pkl\", labels_selected)\n save_as_pickle(\"labels_not_selected.pkl\", labels_not_selected)\n logger.info(\"Split into %d train and %d test lebels.\" % (len(labels_selected), len(labels_not_selected)))\n return f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs",
"def load_label_data(config):\n label_data = pd.read_csv(config.LabelDataConfig.data_path)\n ids = list(label_data['Training cases final'])\n labels = config.build_labels(label_data)\n\n train_ids, val_ids, train_labels, val_labels = train_test_split(\n ids,\n labels,\n stratify=labels,\n train_size=config.ImageDataConfig.train_percent)\n\n train_label_data = {image_id.upper(): label\n for image_id, label in izip(train_ids, train_labels)}\n val_label_data = {image_id.upper(): label\n for image_id, label in izip(val_ids, val_labels)}\n\n return train_label_data, val_label_data",
"def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test",
"def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def load_features_predictors():\n pwd = \"./data/\"\n if __name__ == \"__main__\":\n pwd = \".\" + pwd\n else:\n pass\n\n fn1 = os.path.join(pwd, \"features.npy\")\n fn2 = os.path.join(pwd, \"predictors.npy\")\n\n X = np.load(fn1)\n y = np.load(fn2)\n return X, y",
"def load_data():\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n\r\n # Load the data\r\n\r\n with open(\"clean_real.txt\", 'r') as RealNews:\r\n RealStrAr = RealNews.read().split('\\n')\r\n\r\n with open(\"clean_fake.txt\", 'r') as FakeNews:\r\n FakeStrAr = FakeNews.read().split('\\n')\r\n\r\n # Preprocess it using a vectorizer\r\n\r\n MyCoolVectorizer = CountVectorizer()\r\n X = MyCoolVectorizer.fit_transform(RealStrAr + FakeStrAr)\r\n\r\n RealLabels = np.ones((len(RealStrAr), 1)) # means real\r\n FakeLabels = np.zeros((len(FakeStrAr), 1)) # means fake\r\n AllLabels = np.append(RealLabels, FakeLabels, axis=0)\r\n\r\n FinalTensor = np.append(X.toarray(), AllLabels, axis=1)\r\n\r\n # Randomize it and split it\r\n\r\n np.random.shuffle(FinalTensor)\r\n\r\n # divide and multiply by 2 just to make sure it's even\r\n ROUGHLY70 = 2 * ((FinalTensor.shape[0] * 70 / 100) / 2)\r\n ROUGHLY15 = (FinalTensor.shape[0] - ROUGHLY70) / 2\r\n\r\n # TEST SET VALIDATION SET TRAINING SET DICTIONARY\r\n return (FinalTensor[:ROUGHLY15], FinalTensor[ROUGHLY15 : 2 * ROUGHLY15], FinalTensor[-ROUGHLY70:], MyCoolVectorizer.get_feature_names())",
"def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label",
"def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]",
"def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask",
"def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)"
] | [
"0.7340451",
"0.7329776",
"0.729219",
"0.70436066",
"0.70091707",
"0.6979865",
"0.6900357",
"0.68143857",
"0.67991424",
"0.67709094",
"0.67183954",
"0.6622276",
"0.6608396",
"0.6606673",
"0.65982616",
"0.657758",
"0.65768003",
"0.65713066",
"0.6517931",
"0.65098",
"0.65078866",
"0.6489712",
"0.64857554",
"0.6475307",
"0.6470055",
"0.6467912",
"0.645436",
"0.6449932",
"0.6449179",
"0.6440512"
] | 0.80802953 | 0 |
Function for loading the features and labels associated with the validation dataset. | def _loadValid(self, features, labels):
self.validX_, self.validY_, self.validLabel_ = self.__load(features, labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _loadTrain(self, features, labels):\n\t\tself.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def train_with_validation_provided(self, features, labels, validation_features, validation_labels):\n pass",
"def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)",
"def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label",
"def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]",
"def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels",
"def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features",
"def load_data_and_labels_another():\n\n y_train = []\n y_valid =[]\n y_test = []\n x_train =[]\n x_valid = []\n x_test = []\n index = []\n labels = {}\n topics = ['finance' , 'international', 'legal', 'social','tech','hemp']\n for idx, topic in enumerate(topics):\n folder_name = \"data/\" + topic\n all_files = os.listdir(folder_name)\n clean_news = []\n # read in files in each topic's folder\n for single_file in all_files:\n raw_data_file_name = os.path.join(folder_name, single_file)\n news = list(open(raw_data_file_name, mode = 'rb').readlines())\n clean_news = clean_news + news\n clean_news = [s.strip() for s in clean_news]\n clean_news = [clean_str(s) for s in clean_news]\n clean_news = [normalizing_str(s) for s in clean_news]\n clean_news = [extract_clean_words(s) for s in clean_news]\n clean_news = [s for s in clean_news if s is not None]\n length_of_news = len(clean_news)\n test_num = int(length_of_news * 0.3)\n valid_num = int(length_of_news * 0.3)\n train_num = length_of_news - test_num - valid_num\n clean_news = np.array(clean_news)\n\n \n \n #x_text = x_text + clean_news\n if topic == 'finance':\n y_topic = [[1,0,0,0,0,0] for _ in clean_news]\n elif topic == 'international':\n y_topic = [[0,1,0,0,0,0] for _ in clean_news]\n elif topic == 'legal':\n y_topic = [[0,0,1,0,0,0] for _ in clean_news]\n elif topic == 'social':\n y_topic = [[0,0,0,1,0,0] for _ in clean_news]\n elif topic == 'tech':\n y_topic = [[0,0,0,0,1,0] for _ in clean_news]\n elif topic == 'hemp':\n y_topic = [[0,0,0,0,0,1] for _ in clean_news]\n y_topic = np.array(y_topic)\n\n #randomly shuffle the data and divide them into train, valid and test\n np.random.seed(9)\n indices = np.random.permutation(clean_news.shape[0])\n training_idx = indices[:train_num]\n valid_idx = indices[train_num:train_num+valid_num]\n test_idx = indices[train_num+valid_num: ]\n # Testing: no record is missed.\n #tem = np.concatenate((training_idx, valid_idx, test_idx),axis = 0)\n #print(tem.sort()) \n train_piece_x = list(clean_news[training_idx])\n valid_piece_x = list(clean_news[valid_idx])\n test_piece_x = list(clean_news[test_idx])\n train_piece_y = list(y_topic[training_idx])\n valid_piece_y = list(y_topic[valid_idx])\n test_piece_y = list(y_topic[test_idx])\n y_train = y_train + train_piece_y\n y_valid = y_valid + valid_piece_y\n y_test = y_test + test_piece_y\n x_train = x_train + train_piece_x\n x_valid = x_valid + valid_piece_x\n x_test = x_test + test_piece_x\n\n # Store the data in data_pickle.\n y_train = np.array(y_train)\n y_valid = np.array(y_valid)\n y_test = np.array(y_test)\n file = open('data_pickle', 'wb')\n pickle.dump([x_train,x_valid,x_test,y_train,y_valid,y_test], file)\n file.close()\n print(\"-------------------------------------------------------\")\n print(\"*****Dumped Data_pickle*****\")",
"def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')",
"def read_data(feature_file, label_file):",
"def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask",
"def load_bottleneck_data(training_file, validation_file):\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val",
"def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)",
"def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n\n test_number=int((number_examples-training_number)/2)\n\n\n test = (X[training_number+ 1:training_number+test_number], Y[training_number + 1:training_number+test_number])\n valid = (X[training_number + test_number + 1:],\n Y[training_number + test_number + 1:])\n\n return train,test,valid",
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def loadData(cv_train_path,cv_test_path,experiment_path,class_label,instance_label):\n #Grab path name components\n dataset_name = cv_train_path.split('/')[-3]\n cvCount = cv_train_path.split('/')[-1].split(\"_\")[-2]\n #Create folder to store scaling and imputing files\n if not os.path.exists(experiment_path + '/' + dataset_name + '/exploratory/scale_impute'):\n os.mkdir(experiment_path + '/' + dataset_name + '/exploratory/scale_impute')\n #Load training and testing datasets\n data_train = pd.read_csv(cv_train_path,na_values='NA',sep=',')\n data_test = pd.read_csv(cv_test_path,na_values='NA',sep=',')\n #Grab header labels for features only\n header = data_train.columns.values.tolist()\n header.remove(class_label)\n if instance_label != 'None':\n header.remove(instance_label)\n return data_train,data_test,header,dataset_name,cvCount",
"def load_training_data(fname):\n all_data = load_csv(fname, 'excel-tab')\n\n labels = [rec[2] == 'OFF' for rec in all_data]\n data = [convert_to_reals(clean_text(rec[1])) for rec in all_data]\n max_features = max([len(rec) for rec in data])\n\n # Pad the data\n for rec in data:\n rec.extend([0.0] * (max_features - len(rec)))\n\n return labels, data, max_features",
"def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)",
"def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)",
"def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test",
"def load_files(self) -> Tuple[List[str], List[str]]:\n filename, _, ext = self.file_path.rpartition(\".\")\n features_file = filename + \"_nospace.\" + ext\n labels_file = filename + \"_bies.\" + ext\n features = self.read_dataset(features_file)\n labels = self.read_dataset(labels_file)\n avg_len = sum(len(s) for s in features) // len(features)\n print(\"Dataset average length:\", avg_len)\n self.max_length = avg_len + (avg_len // 3)\n return features, labels",
"def load_dataset(dataset_path: str, mode: str, vectorizer_path: str):\n if mode == \"celeb\":\n x_path = dataset_path + \"/celebrity-feeds.ndjson\"\n else:\n x_path = dataset_path + \"/feeds.ndjson\"\n y_data = [json.loads(line) for line in open(\"./data/gt-labels.ndjson\", \"r\")]\n\n if not Path(vectorizer_path).exists():\n logging.info(\"no stored vectorizer found, creating ...\")\n vec = TfidfVectorizer(preprocessor=_preprocess_feed, ngram_range=N_GRAM_RANGE,\n max_features=MAX_WORD_FEATURES, analyzer='word', min_df=3,\n )# norm='l1')\n vec.fit(_read_text_linewise(x_path, mode))\n joblib.dump(vec, vectorizer_path)\n else:\n logging.info(\"loading stored vectorizer\")\n vec = joblib.load(vectorizer_path)\n\n # load x data\n logging.info(\"transforming data ...\")\n x = vec.transform(_read_text_linewise(x_path, mode))\n\n # load Y data\n # y_gender = [g_dict[l[\"gender\"]] for l in y_data]\n y_gender = []\n y_occ = []\n y_age = []\n ids = []\n\n for l in y_data:\n y_gender.append(g_dict[l[\"gender\"]])\n y_occ.append(o_dict[l[\"occupation\"]])\n y_age.append(_get_age_class(l[\"birthyear\"]))\n ids.append(l[\"id\"])\n\n # y_occ = [o_dict[l[\"occupation\"]] for l in y_data]\n # y_age = [_get_age_class(l[\"birthyear\"]) for l in y_data]\n # ids = [i[\"id\"] for i in y_data]\n return x, y_age, y_gender, y_occ, ids",
"def _load_validation_data(validation_leveldb, width, height):\n\n print \"\\tLoading validation data...\"\n input_vectors = []\n expected_targets = []\n\n db = plyvel.DB(validation_leveldb)\n for key, value in db:\n datum = Datum()\n datum.ParseFromString(value)\n\n data = np.fromstring(datum.data, dtype=np.uint8)\n data = np.reshape(data, (3, height, width))\n # Move the color channel to the end to match what Caffe wants.\n data = np.swapaxes(data, 0, 2) # Swap channel with width.\n data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.\n\n input_vectors.append(data)\n expected_targets.append(datum.label)\n\n db.close()\n\n print \"\\t\\tValidation data has %d images\" % len(input_vectors)\n\n return {\n \"input_vectors\": np.asarray(input_vectors),\n \"expected_targets\": np.asarray(expected_targets)\n }",
"def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n valid_number=min(1000,number_examples-training_number)\n test = (X[training_number+ 1:training_number+valid_number+1], Y[training_number + 1:training_number+valid_number+1])\n return train,test"
] | [
"0.7066322",
"0.6910587",
"0.68762726",
"0.68701327",
"0.6851733",
"0.6828537",
"0.6772606",
"0.6747656",
"0.671662",
"0.6708065",
"0.6663688",
"0.6652927",
"0.66424817",
"0.66018474",
"0.6580891",
"0.6563428",
"0.6560234",
"0.65583473",
"0.65253925",
"0.6494413",
"0.64848816",
"0.6482083",
"0.64558566",
"0.6447768",
"0.64109296",
"0.6402818",
"0.6400421",
"0.6389543",
"0.6388269",
"0.63738906"
] | 0.7184243 | 0 |
Handles displaying recipe categories | def categories():
return render_template('categories.html', recipe_categories=USERS[session['username']].recipe_categories) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_categories():\n if 'user' not in session:\n flash(\"You need to own this recipe to be able to delete it.\")\n return redirect(url_for(\"login\"))\n\n categories = list(mongo.db.categories.find().sort(\"category_name\", 1))\n return render_template(\"categories.html\", categories=categories)",
"def getCategory():",
"def category(request):\n\n return render(request, \"core/category_list.html\", {\n \"category_list\": Category.objects.all()\n })",
"def categories(self):\n pass",
"def all_categories(request, slug=None):\n c = {\"categories\": Node.objects.filter(kind=\"C\")}\n return render_to_response(\"categories.html\", c)",
"def categories(request):\n human_readable = []\n for obj in Listing.CHOICES:\n\n # For each listing choice, we add the human readable format, and the machine-readable as well.\n human_readable+= [[obj[0], obj[1]]]\n \n # Returns categories.html, giving it access to variables.\n return render(request, \"auctions/categories.html\", {\n 'li': human_readable,\n \"bo\": False\n })",
"def recipes(category):\n # if statements to display the recipes base on category name\n if category == \"Pre Workout Meal\":\n recipe = mongo.db.recipes.find({\"category_name\": \"Pre Workout Meal\"})\n elif category == \"Post Workout Meal\":\n recipe = mongo.db.recipes.find({\"category_name\": \"Post Workout Meal\"})\n else:\n recipe = mongo.db.recipes.find()\n\n return render_template('pages/allrecipe.html', recipe=recipe, category_title=category, recipes=mongo.db.recipes.find(), isFooter=True)",
"def show_categories(self):\n cat_model = TreeModel(('Categories', ))\n self.categoriesView.setModel(cat_model)\n\n categories = self.orm.fetch_parents()\n for category in categories:\n item = TreeItem(category, cat_model.rootItem)\n cat_model.rootItem.appendChild(item)\n\n subs = self.orm.fetch_subcategories_for_parent(category)\n\n for sub in subs:\n sub_item = TreeItem(sub, item)\n item.appendChild(sub_item)\n\n self.categoriesView.expandAll()",
"def CategoryView(request, cats):\r\n\r\n context = {\r\n 'posts': Post.objects.filter(category=cats.replace('-', ' ')).all(),\r\n 'cats': cats,\r\n 'cat_menu': Category.objects.all()\r\n }\r\n return render(request, 'blog/categories.html', context)",
"def category_choice(self):\n self.leave_category_choice = 1\n while self.leave_category_choice:\n print(fr.FR[15])\n for element in config.CATEGORIES:\n print(str(config.CATEGORIES.index(element)+1)\n + \" : \" + element)\n self.category_choice_input()",
"def category(request, slug):\n categry = get_object_or_404(Category,slug=slug)\n story_list = Story.objects.filter(category=category)\n heading = \"Category: %s\" % category.label\n return render_to_response('cms/story_list.html', locals())",
"def categories(content_id=None):\n try:\n data = {'categories': content_categories(request.content)}\n return render(data, template='categories.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')",
"def show_categories():\n for category in NEWS_CATEGORIES:\n print(category)",
"def add_category():\n if request.method == 'POST':\n result = USERS[session['username']].add_recipe_category(\n request.form['title'])\n if result == 'recipe_category added':\n flash(result, 'info')\n else:\n flash(result, 'warning')\n return redirect(url_for('categories'))\n return render_template('add_category.html')",
"def category(request, category_id, template_name='doppler/shift/catalog/category.haml'):\n category = get_object_or_404(Category, pk=category_id, enabled=True)\n products = category.enabled_products\n subcategories = category.children.filter(enabled=True)\n return render_to_response(\n template_name,\n {\n 'category': category,\n 'products': products,\n 'subcategories': subcategories,\n },\n context_instance=RequestContext(request))",
"def list_categories(self):\n raise NotImplementedError()",
"def categories(request):\n \n # query for all active listings and initialize an empty list\n listing = Auction_listing.objects.filter(active=True)\n categories = []\n # loop over all listings, if category of current listing is not yet present in the categories list, add it there\n for lis in listing:\n if lis.category not in categories and lis.category != '':\n categories.append(lis.category)\n \n return render(request, 'auctions/categories.html', {\n 'categories': categories\n })",
"def getcategory(self):\n\n response = requests.get(\"https://fr.openfoodfacts.org/categories.json\")\n\n data = response.json()\n\n self.rawcategorydata = data",
"def menu_categories(self, app: object) -> None:\n while True:\n if self.back:\n break\n else:\n self.cmd_categories = app.view_cat()\n rand_cat = random.sample(list(self.cmd_categories), 10)\n print(\"-\" * 50)\n for x in rand_cat:\n print(f\"{x} : {self.cmd_categories[x]}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner la catégorie correspondante : \"\n )\n if entry in self.cmd_categories:\n if entry == \"0\":\n break\n else:\n self.menu_products(app, entry)\n else:\n print(\"\\nCommande incorrecte\")",
"async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")",
"def add_categories_handler():\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n category = addCategory(name, picture, description, g.user.id)\n return jsonify(category=category.serialize)",
"def insert_recipe_category():\r\n\r\n # validates request form\r\n form = request.form\r\n error_list = validate_form(form, 'recipe_category')\r\n\r\n if error_list == []:\r\n # validates image URL\r\n image_URL = validate_image(form['img_link'])\r\n\r\n # inserts recipe category\r\n recipe_category = {\r\n 'name': request.form.get('name'),\r\n 'img_link': image_URL,\r\n 'number_of_recipes': 0\r\n }\r\n mongo.db.recipe_categories.insert_one(recipe_category)\r\n\r\n # redirects to the recipe category search\r\n return redirect(url_for(\r\n 'search',\r\n collection='recipe_categories')\r\n )\r\n\r\n else:\r\n # initializes page title and header\r\n page_title = 'Add recipe category'\r\n page_header = 'Add a new recipe category:'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'add_form.html',\r\n errors=error_list,\r\n form=form,\r\n page_title=page_title,\r\n page_header=page_header\r\n )",
"def categories_menu():\n categories = ['EU-affairs', 'Economy', 'Security', 'Society', 'World']\n\n for category in categories:\n url = build_url({'mode': 'Topic', 'foldername': category, 'page': 1})\n li = xbmcgui.ListItem(category, iconImage='DefaultFolder.png')\n xbmcplugin.addDirectoryItem(handle=ADDON_HANDLE, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(ADDON_HANDLE)",
"def showCategories():\n categories = session.query(Category).order_by(asc(Category.name))\n return render_template('categories.html', categories=categories)",
"def get_categories(request):\n try:\n categories = []\n for category in Category.objects.all():\n categories.append({\"title\": category.title, \"id\": category.pk})\n\n return format_ajax_response(True, \"Knowledgebase categories retrieved successfully.\", {\"categories\": categories})\n except Exception as ex:\n logger.error(\"Failed to get_categories: %s\" % ex)\n return format_ajax_response(False, \"There was an error retreiving the knowledgebase categories.\")",
"def show_categories():\n categories = session.query(Category).all()\n username = login_session.get('username')\n user_id = login_session.get('user_id')\n provider = login_session.get('provider')\n if username is not None:\n username = login_session.get('username')\n return render_template(\"categories.html\", categories=categories,\n username=username, user_id=user_id,\n provider=provider)",
"def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat",
"def showCategories():\n\n categories = session.query(Category).order_by(asc(Category.name))\n if 'user_id' in login_session:\n return render_template('categories.html',\n categories=categories,\n user=getUserInfo(login_session['user_id']))\n else:\n return render_template('pubcategories.html', categories=categories)",
"def showCategories():\r\n categories = session.query(Category).all()\r\n if 'username' not in login_session:\r\n return render_template(\"publicCategories.html\", categories=categories)\r\n return render_template(\"categories.html\", categories=categories)",
"def cli(ctx, category_id):\n return ctx.ti.categories.show_category(category_id)"
] | [
"0.7086564",
"0.68516177",
"0.67932105",
"0.67012036",
"0.66799504",
"0.6656528",
"0.6643379",
"0.66044766",
"0.6603303",
"0.66017115",
"0.65058404",
"0.6419562",
"0.6403973",
"0.6363026",
"0.63584274",
"0.63432485",
"0.63299835",
"0.6321505",
"0.6302908",
"0.62928087",
"0.62873167",
"0.6243916",
"0.61774814",
"0.6173833",
"0.6147856",
"0.6141225",
"0.6100204",
"0.6052365",
"0.60501033",
"0.60482293"
] | 0.7799081 | 0 |
Inserts a book into the book table | def insert_book(self, title, author, year, isbn):
self.cursor.execute("INSERT INTO Book VALUES(NULL, ?, ?, ?, ?)",
(title, author, year, isbn))
self.connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_book(title, author, year):\n try:\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n INSERT INTO books(title, author, year)\n VALUES(?,?,?)\n \"\"\", (title, author, year))\n conn.commit()\n except Exception as e:\n logging.error(e)\n return False\n\n return True",
"def add_book(self, book):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('INSERT INTO books values (?, ?, ?)', (book.title, book.author, book.read))\n book.id = cur.lastrowid\n except sqlite3.IntegrityError:\n raise BookError('This book is already in the database')\n except sqlite3.Error as e:\n raise BookError(f'Error adding book {book}') from e",
"def insert(title, author, year, isbn,shelf,raw):\n\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n sql=\"INSERT INTO book (title, author, year, isbn,shelf,raw) VALUES(%s, %s, %s, %s, %s, %s)\"\n cur_obj.execute(sql,(title, author, year, isbn,shelf,raw))\n conn_obj.commit()\n conn_obj.close()",
"def insertData(self, table, title, rating, authorinfo, pubinfo):\n\n\t\tsql = \"insert into %s (bookname, authorinfo, pubinfo, rating) \\\n\t\t\tvalues('%s', '%s', '%s', '%s')\" %(table, title, authorinfo,\n\t\t\tpubinfo, rating)\n\t\ttry:\n\t\t\tself.cursor.execute(sql)\n\t\t\tself.conn.commit()\n\t\texcept Exception, e:\n\t\t\tsys.exit()",
"def create(self, book_info, destroy):\n self.connect()\n bid = book_info[0].get()\n title = book_info[1].get()\n author = book_info[2].get()\n status = book_info[3].get()\n status = status.lower()\n\n q = \"insert into {} values ('{}','{}','{}','{}')\"\n addbook_query = q.format(self.book_table, bid, title, author, status)\n try:\n self.cur.execute(addbook_query)\n self.con.commit()\n messagebox.showinfo('Success', \"Book added successfully\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't add data into Database\")\n print(err)\n destroy()",
"def save_book(self):\n db.session.add(self)\n db.session.commit()",
"def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)",
"def insert_book():\n \n\n if request.method == 'POST':\n new_author = mongo.db.authors.insert_one({\n 'author_name': request.form.to_dict()['author_name']\n })\n \n author_id = new_author.inserted_id\n \n # Create new book in mongo.db.books\n new_book = mongo.db.books.insert_one({\n 'title': request.form.to_dict()['title'],\n 'genre': request.form.to_dict()['genre'],\n 'pages': request.form.to_dict()['pages'],\n 'reviews': [],\n 'likes': [],\n 'dislikes': [],\n 'author_id': str(ObjectId(author_id)),\n 'isbn_num': request.form.to_dict()['isbn_num']\n })\n \n return redirect(url_for('library'))\n \n return render_template('insert_book.html', \n genres=[genre for genre in mongo.db.genres.find()])",
"def insert_books_data():\n # Get data from csv file\n print(\"Getting data from csv..\")\n file = open(\"books.csv\")\n reader = csv.reader(file)\n\n # Insert csv data into table\n print(\"Inserting data into 'books' table..\")\n for isbn, title, author, year in reader:\n try:\n db.execute(\"INSERT INTO books (isbn, title, author, year)\\\n VALUES (:isbn, :title, :author, :year)\", {\n \"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year })\n except exc.DataError as err:\n print(\"Invalid entry in csv file\")\n db.commit()\n print(\"Data inserted\")",
"def create_book():\n Book.objects.create(book_id=\"test_id\",\n title=\"test_title\",\n authors=\"test_author\",\n published_date=\"2021\",\n categories=[\"test_category\"],\n average_rating=5,\n ratings_count=5,\n thumbnail=\"http://books.google.com/books/test\"\n )",
"def register_book(self, title: str, author: str, price: float, barcode: str, stock=0):\n try:\n if not self.verify_register(barcode):\n self.db.cursor.execute('INSERT INTO books (title, author, price, bar_code, stock) VALUES (%s, %s, %s, '\n '%s, %s)', (title, author, round(price, 2), barcode, stock))\n self.db.con.commit()\n self.db.con.close()\n print('Registered Successfully!')\n else:\n print('Book already registered!')\n except Exception as error:\n print(error)",
"def create_book(title, author, completion):\n return Book.objects.create(title=title, author=author, completion=completion)",
"def add_book(book_name: str, book_price: int, user_id: int):\n book = Book(num=0, id=0, book_name=book_name, book_price=book_price, user_id=user_id)\n session.add(book)\n # auto increment id from 1\n books = get_user_books(user_id)\n\n auto_increment(books)\n print(books)\n session.commit()",
"def add(self, path, title, author):\n path = path.decode('utf8')\n title = title.decode('utf8')\n author = author.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (title, author, filename, dirname)\n sql = u\"insert into books values (?, ?, ?, ?)\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()",
"def create_books_table():\n try:\n print(\"Creating 'books' table..\")\n db.execute(\"CREATE TABLE books (\\\n id SERIAL PRIMARY KEY, \\\n isbn VARCHAR NOT NULL, \\\n title VARCHAR NOT NULL, \\\n author VARCHAR NOT NULL, \\\n year INTEGER NOT NULL\\\n )\")\n print(\"Table 'books' created\")\n except exc.ProgrammingError as err:\n print(\"Table 'books' already exists\")\n db.commit()",
"def insert(self):\n self.getDbRecord().insert()\n\n return",
"def add_new(cls, name, author_id):\n\t\tbook = Book(name, author_id)\n\t\tdb.session.add(book)\n\t\tdb.session.commit()\n\t\treturn book.id",
"def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()",
"def insert(self):\n db.session.add(self)\n db.session.commit()",
"def insert(self):\n db.session.add(self)\n db.session.commit()",
"def insert(self):\n db.session.add(self)\n db.session.commit()",
"def newClip(self, book, content, typ, date):\n sql = ''' insert into clippings values (NULL, '%s', '%s', '%s', '%s', '%s')\n''' % (book, '0', typ, date, content)\n\n self.__execute__(sql)\n pass",
"def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()",
"def insert_db():\n populate_tables()",
"def insert(self, bus_tup):\n self.conn = psycopg2.connect(self.conn_string)\n self.cur = self.conn.cursor(\"rifflecursor\")\n self.query = \"\"\"\n INSERT INTO yelp_stored (business_id, name, city, country,\n old_rating, new_rating, rev_count, count_5, count_4, count_3,\n count_2, count_1, fav_count, unfav_count, avg_wts)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s)\n \"\"\"\n self.cur.execute(self.query, bus_tup)\n self.conn.commit()\n self.conn.close()",
"def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })",
"def save_book(request,book_id):\n \n if request.user.is_authenticated:\n user = get_object_or_404(\n UserModel,\n id=request.user.id\n )\n book = Response.response_front(book_id)\n\n #save book with all informations on table Book\n \n Book.objects.add_book(\n book_id,\n user,\n title=(book['title'][0]),\n book_cat=Response.build(book['categorie'][0]),\n picture=book['picture'][0],\n picture_detail=book['picture_detail'][0],\n description=book['description'][0],\n author=book['author'][0]\n )\n\n return redirect(\"home\")",
"def addBook(self, book):\n self._books[book.getTitle()] = book",
"def add_book(name, author):\n BOOKS.append({'name': name, 'author': author, 'read': False})",
"def populate(library):\n # Clears table\n table.delete(*table.get_children())\n\n # Inserts each book into the table\n # where text is the key field\n for book in library:\n table.insert(\"\", int(book[0]), text=book[0], values=(book[1], book[2], book[3], book[4]))"
] | [
"0.8030016",
"0.75679004",
"0.74723715",
"0.74601537",
"0.7431787",
"0.70822483",
"0.6939844",
"0.688597",
"0.66291636",
"0.6627789",
"0.6603897",
"0.65962255",
"0.6493063",
"0.64219564",
"0.6401724",
"0.625291",
"0.62327266",
"0.62045246",
"0.618287",
"0.618287",
"0.618287",
"0.61036724",
"0.60541046",
"0.6052733",
"0.6046094",
"0.60393214",
"0.6005825",
"0.5996558",
"0.59912",
"0.59850335"
] | 0.80825835 | 0 |
Updates a book from the book database | def update(self, id, title, author, year, isbn):
self.cursor.execute("UPDATE Book SET Title = ?, Author = ?, Year = ?, \
ISBN = ? WHERE Id = ?",
(title, author, year, isbn, id))
self.connection.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put(self, book_id):\n a_book = query_book_by_id(book_id)\n if a_book is None:\n return 'Book does not exit', 404\n body = request.get_json()\n a_book.parse_body(body)\n db.session.add(a_book)\n db.session.commit()\n return a_book.serialize(), 200",
"def update_audiobook(_id, _title_of_the_audiobook, _author_of_the_title, _narrator,\r\n _duration_in_number_of_seconds):\r\n audiobook_to_update = Audiobook.query.filter_by(id=_id).first()\r\n audiobook_to_update.title_of_the_audiobook = _title_of_the_audiobook\r\n audiobook_to_update.author_of_the_title = _author_of_the_title\r\n audiobook_to_update.narrator = _narrator\r\n audiobook_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()",
"def update(d,title, author, year, isbn,shelf,raw):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"UPDATE book where isbn=%s\"\n \"SET title = %s, \"\n \"author = %s, \"\n \"year = %s, \"\n \"shelf=%s,\"\n \"raw=%s\", \n (isbn,title, author, year,shelf,raw))\n conn_obj.commit()\n conn_obj.close()",
"def update_book(isbn):\n put_req = request.get_json()\n if not (Book.replace_book(isbn, put_req['name'], put_req['price'])):\n invalid_book_object_error_msg = {\n \"error\": \"Invalid book object update passed in PUT request\",\n \"helpString\": \"Valid data format is {'name': 'bookname', 'price': 7.9, 'isbn': 12345678}\"\n }\n # Because invalidBookObjectErrorMsg is a dictionary, need to convert it into a json object.\n # Set Header info for location (location of endpoint in request)\n return Response(json.dumps(invalid_book_object_error_msg), status=406, mimetype='application/json')\n # See https://www.flaskapi.org/api-guide/status-codes/ for flask API\n # response codes\n response = Response(\"\", 204, mimetype='application/json')\n response.headers['Location'] = \"/books/\" + str(isbn)\n return response",
"def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()",
"def update_by_id(cls, id, name, author_id):\n\t\tbook = Book.query.get(id)\n\t\tbook.name = name\n\t\tbook.authors_id = author_id\n\t\tdb.session.commit()",
"def test_api_can_update_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# update book\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['title'] == 'updated book')",
"def update_book(self, book_form):\n # Implemented from template for\n # osid.resource.BinAdminSession.update_bin_template\n if self._catalog_session is not None:\n return self._catalog_session.update_catalog(catalog_form=book_form)\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n if not isinstance(book_form, ABCBookForm):\n raise errors.InvalidArgument('argument type is not an BookForm')\n if not book_form.is_for_update():\n raise errors.InvalidArgument('the BookForm is for update only, not create')\n try:\n if self._forms[book_form.get_id().get_identifier()] == UPDATED:\n raise errors.IllegalState('book_form already used in an update transaction')\n except KeyError:\n raise errors.Unsupported('book_form did not originate from this session')\n if not book_form.is_valid():\n raise errors.InvalidArgument('one or more of the form elements is invalid')\n collection.save(book_form._my_map) # save is deprecated - change to replace_one\n\n self._forms[book_form.get_id().get_identifier()] = UPDATED\n\n # Note: this is out of spec. The OSIDs don't require an object to be returned\n return objects.Book(osid_object_map=book_form._my_map, runtime=self._runtime, proxy=self._proxy)",
"def test_update_book_details(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n new_book_details = {\n \"title\": \"First Man\",\n \"author\": \"James Hansen\",\n \"year\": 2018,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 5\n }\n\n assert first_book_list.update_book_details(new_book_details) == True\n assert first_book_list.find_book(\"First Man\") == True\n\n for book in first_book_list.show_all():\n assert book.get(\"title\") == \"First Man\"\n assert book.set(\"title\", \"First Man: The Life of Neil A. Armstrong\") == True\n\n assert first_book_list.find_book(\"First Man: The Life of Neil A. Armstrong\") == True",
"def update_price_books(self, barcode, new_price):\n try:\n self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Updated Successfully!')",
"def update_book():\n try:\n key = list(request.args.keys())[0]\n val = request.args[key].strip('\"')\n data = request.get_json()\n filter = {key: val}\n except IndexError:\n queryVal = request.form.to_dict()\n filter_val, change_to_val = parse_filter_newValue(queryVal)\n filter = {filter_val[0]: filter_val[1]}\n data = {change_to_val[0]: change_to_val[1]}\n if all(value == '' for value in data.values()) or all(value == '' for value in filter.values()):\n print('here tho')\n return render_template('error.html', message=\"Please enter both fields\"), 400\n new_values = {\"$set\": data}\n mongo.db.Books.update_one(filter, new_values, upsert=False)\n\n return render_template(\"updated_book.html\", message=\"Book Has been updated\"), 200\n # return jsonify({'result': \"Successfully Updated\"}), 200",
"def add_book(self, book):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('INSERT INTO books values (?, ?, ?)', (book.title, book.author, book.read))\n book.id = cur.lastrowid\n except sqlite3.IntegrityError:\n raise BookError('This book is already in the database')\n except sqlite3.Error as e:\n raise BookError(f'Error adding book {book}') from e",
"def test_partly_update_book(self):\n data = {'isbn':'96712116-2'}\n response = self.client.patch(self.book.get_absolute_url(), data, format='json', content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.get(self.book.get_absolute_url())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, '96712116-2')",
"def edit_book(request, pk):\n\tbook = get_object_or_404(Book, pk=pk)\n\tif request.method == \"POST\":\n\t\tif book.author == request.user:\n\t\t\tform = BookForm(request.POST, instance=book)\n\t\t\tif form.is_valid():\n\t\t\t\tbook = form.save(commit=False)\n\t\t\t\tbook.author = request.user\n\t\t\t\tbook.save()\n\t\t\t\treturn redirect('book_list')\n\t\telse:\n\t\t\tform = BookForm(instance=book)\n\t\t\terror_msg = \"Your are not the auther of this book, only the actual author can edit this book\"\n\t\t\tmessages.error(request, error_msg)\n\telse:\n\t\tform = BookForm(instance=book)\n\n\tmessages.info(request, \"Editing an existing book\")\n\treturn render(request, 'BookManagement/new_book.html', {'form': form})",
"def save_book(self):\n db.session.add(self)\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def alter_book(old_name, new_name, book_price):\n book = session.query(Book).filter(Book.book_name == old_name).first()\n if book:\n book.book_name = new_name\n book.book_price = book_price\n return True",
"def test_update_book(self):\n book_information = self.books_from_json[0]\n book_id = '60773a16cb838494e13d3652'\n self.books.update = MagicMock(return_value=None) # success on update\n update_book = self.books.update_details(book_id, self.books_from_json[0])\n self.assertEqual(\"Mock Book updated!\", update_book['flash_message'])",
"def test_update_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n \"\"\"\n clear the table, create several books, update them and read them\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book_one[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_one[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n with test_client.put(\n \"/book/{}/\".format(book_two[\"id\"]),\n data={\n \"title\": \"title two updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_two[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )",
"def rent_book(self, bookID):\n query = f\"\"\"UPDATE {TABLE} set quantity = quantity - 1 where bookID = '{bookID}';\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)",
"def model_update(self, db):\n db.session.commit()",
"def test_edit_book(client, book):\n response = client.post(f\"/bookmarks/book/edit/{book.id}\",\n data=dict(\n header=\"test_header\",\n writer=\"writer\",\n ISBN=123,\n comment=\"comment\"\n ),\n follow_redirects=True)\n assert response.status_code == 200\n assert b\"test_header\" in response.data",
"def change_copies(self, book_id):\n book = Book.query.filter_by(id=book_id).first()\n if book:\n self.copies += 1\n db.session.commit()",
"def edit(request, pk):\n obj = get_object_or_404(Book, id=pk)\n form = BookForm(request.POST or None, request.FILES or None, instance=obj)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'Your password was updated successfully!')\n return redirect('book_list')\n return render(request, 'upload_book.html', {'form': form})",
"def test_api_update_book_id_is_integer(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\t\t# update book\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1k',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'please provide a book id. ID must be integer')",
"def db_update_entry():\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n name = get_name()\n if name in db:\n phone_number = get_phone_number(db[name.capitalize()])\n print(\"Updating existing entry ..... {name}\\n\".format(name=name))\n db[name.capitalize()] = phone_number\n db.sync()\n else:\n print_error()\n db.close()\n db_show_all()",
"def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)",
"def update_by_id(cls, id, name, surname):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.name = name\n\t\tauthor.surname = surname\n\t\tdb.session.commit()",
"def update(self, instance, validated_data):\n instance.user = validated_data.get('user', instance.user)\n instance.book_id = validated_data.get('book_id', instance.book_id)\n instance.save()\n return instance"
] | [
"0.7573654",
"0.72800773",
"0.724804",
"0.71230936",
"0.7046375",
"0.70066255",
"0.6929183",
"0.6876896",
"0.6876203",
"0.68481857",
"0.6840768",
"0.6555312",
"0.6484862",
"0.6337583",
"0.6248639",
"0.62454295",
"0.62454295",
"0.6211745",
"0.6150676",
"0.61380297",
"0.61138356",
"0.60077894",
"0.59998834",
"0.5987418",
"0.59685236",
"0.5960455",
"0.59345114",
"0.5875217",
"0.58450973",
"0.58092135"
] | 0.7472767 | 1 |
Create a UI from a SQL table and return a ``Panel`` | def create_tables(name, role, doc, options, connection):
if role == 'data_samples':
print(f'create data_samples={name}')
data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_samples)
panel = Panel(child=data_table.get_ui(), title=name, name=f'data_samples_{name}_main_panel')
elif role == 'data_tabular':
print(f'create data_tabular={name}')
data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_tabular)
panel = data_table.get_ui()
elif role == 'data_graph':
print(f'create data_graph={name}')
data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_graph)
panel = data_table.get_ui()
else:
raise NotImplementedError(f'role not implemented={role}')
assert isinstance(panel, Panel), f'unexpected type={type(panel)}'
# handle the table preamble here
metadata_name = get_metadata_name(name)
metadata = get_table_data(connection, metadata_name)
preamble = metadata.get('table_preamble')
if preamble is not None and len(preamble[0]) > 0:
# we have a preamble for our table. Add a ``Div`` widget to y_axis the preamble
child = column(Div(text=preamble[0]), panel.child, sizing_mode='stretch_both')
panel.update(child=child)
print(f'table={name} created!')
return panel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeTableWidget(self):\n from collective.table.browser.table import TableWidget\n context = self.portal.table\n widget = TableWidget(context, None)\n widget.fieldName = 'table'\n return widget",
"def create_panel(self):\n return\n # return Panel(self)",
"def initUI(self):\n centralwidget = QtWidgets.QWidget()\n self.setCentralWidget(centralwidget)\n\n person_table = PersonTable(self)\n person_table.embed(self.sql)\n\n # Box Layout to organize our GUI\n lay = QtWidgets.QVBoxLayout(centralwidget)\n lay.addWidget(person_table)\n self.setGeometry(0, 0, person_table.width() + 20, person_table.height() + 20)\n self.person_table = person_table\n self.show()",
"def create_widget(self):\n self.widget = QTableView(self.parent_widget())\n self.widget.setAttribute(Qt.WA_StaticContents, True)",
"def generate_gui():\n layout = [\n [sg.Text(\"Load from file: \", size=(40, 1),\n font=(38,), key=\"-LOAD_STATUS-\")],\n [sg.InputText(\"\"), sg.FileBrowse(\"Browse\")],\n [sg.Button(\"Load\")],\n [sg.Text(\"Query:\"), sg.Text(\"\", size=(40, 1), key=\"-OUTPUT-\")],\n [sg.Input(key=\"-QUERY-\")],\n [sg.Button(\"Output - CSV\"), sg.Button(\"Output - Images\"), sg.Exit()],\n ]\n\n window = sg.Window(\"MTG SQL Interface\", layout)\n\n while True:\n event, values = window.Read()\n if event in (None, \"Exit\"):\n break\n\n if event == \"Load\":\n # load file into Deck object & database\n if values[\"Browse\"] != \"\":\n p = Path(values[\"Browse\"])\n _, db = file_to_db(p, window)\n window[\"-QUERY-\"].Update(f\"\"\"SELECT * FROM {p.stem} WHERE\"\"\")\n\n if event == \"Output - CSV\":\n # query database, store output as csv\n p = Path(values[\"Browse\"]).stem + \".db\"\n d = query_db(p, values[\"-QUERY-\"], output=\"text\")\n window[\"-OUTPUT-\"].Update(\"written to CSV\")\n\n if event == \"Output - Images\":\n # query database, download corresponding images\n p = Path(values[\"Browse\"]).stem + \".db\"\n d = query_db(p, values[\"-QUERY-\"], output=\"images\")\n window[\"-OUTPUT-\"].Update(\"images saved\")\n\n window.Close()",
"def create_page(self, parent):\n ui = self.edit_traits(parent=parent, kind=\"subpanel\")\n return ui.control",
"def createWidget(self):\n figure = Figure(figsize=(4,2), dpi=100)\n \"\"\"Figure size is measured in inches.\"\"\"\n graph = figure.add_subplot(111)\n \"\"\"The default subplot, which creates one row, one column, with index one.\"\"\"\n graph.plot(self.wave_table[0], self.wave_table[1])\n\n canvas = FigureCanvasTkAgg(figure, self.master)\n canvas.draw()\n canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)",
"def _buildUi(self):\r\n for r, row_data in enumerate(self._data):\r\n self.insertRow(r)\r\n for c, column_data in enumerate(row_data):\r\n if r == 0:\r\n self.insertColumn(c)\r\n item = QtGui.QTableWidgetItem(column_data)\r\n self.setItem(r, c, item)",
"def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout",
"def generate_panel(self):\r\n \r\n self.PanelData = self.RawData.filter(['ID', 'X', 'Z', 'W', 'R', 'β', 'LFP', 'H'], axis=1)",
"def create_widgets( self ):",
"def __init__(self, parent=None, sql=None):\n # QtWidgets.__init__(self, parent)\n super().__init__(parent)\n self.sql = sql\n uic.loadUi(\"./ui/person_widget.ui\", self)\n\n # returns table has 9 columns but not showing last (pid)\n self.person_columns = [\n \"fullname\",\n \"lunaid\",\n \"age\",\n \"dob\",\n \"sex\",\n \"lastvisit\",\n \"maxdrop\",\n \"studies\",\n ]\n\n # current selection\n self.row_i = -1\n self.NoDropCheck = None # maybe menu checkbox\n self.luna_search_settings = None # will be actiongroup\n\n self.setup_table()\n self.setup_textboxes()\n self.setup_right_click()\n self.setup_add_person()\n\n # trigger model for editting person info\n self.EditPeople = EditPeople.EditPeopleWindow(self)\n self.EditPeople.accepted.connect(self.change_person_to_db)",
"def __init__(self, window, database):\r\n super().__init__()\r\n self.window = window\r\n self.mainLayout = QGridLayout()\r\n # Creating the Widgets\r\n self.createLabels(database)\r\n self.createLineWidgets()\r\n self.createButtons()\r\n # Adding the Table widget in function of the database name\r\n self.addTable(database)\r\n # Connecting the buttons with their function\r\n self.buttons[\"REFRESH TABLE\"].clicked.connect(\r\n partial(self.addTable, database))\r\n self.buttons[\"SAVE\"].clicked.connect(\r\n partial(self.addItem, database))\r\n self.buttons[\"DELETE\"].clicked.connect(\r\n partial(self.deleteRow, database))\r\n self.buttons[\"ADD NULL DAY\"].clicked.connect(\r\n partial(self.addingNull, database))",
"def create_widgets(self):",
"def select_table(self):\n\n selected = self.mylist.selection_get()\n data = self.read_table(selected)\n db_frame = self.db_frame\n\n db_frame.pack(side=\"left\", fill=\"both\")\n col_names = tuple((\"heading%d\" % i for i in range(len(data[0]))))\n if not self.Tree:\n self.Tree = Treeview(db_frame, columns=col_names)\n else:\n self.Tree.destroy()\n self.scrollbarY.destroy()\n self.scrollbarX.destroy()\n self.Tree = Treeview(db_frame, columns=col_names)\n self.scrollbarY = Scrollbar(db_frame)\n self.scrollbarX = Scrollbar(db_frame, orient=HORIZONTAL)\n self.Tree.config(yscrollcommand=self.scrollbarY.set,\n xscrollcommand=self.scrollbarX.set)\n\n for x in data:\n self.Tree.insert('', 'end', values=x)\n for col in col_names:\n self.Tree.heading(col, text=col)\n self.scrollbarY.config(command=self.Tree.yview)\n self.scrollbarY.pack(side='right', fill=Y)\n self.scrollbarX.config(command=self.Tree.xview)\n self.scrollbarX.pack(side='bottom', fill=X)\n self.Tree.pack(side='left', fill='both')",
"def __init__(self, plot_factory, df, title=\"Graph\"):\n super().__init__(title=title)\n self.Table = Table(plot_factory, df, \"Show Table\")\n self.totalButtons = 10\n self.plot_factory = plot_factory\n self.df = df\n self.IdTitlePair = [\"id\", \"title\"]",
"def create_widget(self, parent, tree):\n widget = wx.Panel(parent)\n sizer = wxSingleWidgetSizer()\n widget.SetSizer(sizer)\n return widget",
"def ui(self):\n return ui",
"def _make_sliders_and_tables(self, df):\n if not len(df):\n raise ValueError('DataFrame must be at least one entry long.')\n\n self.evt_sel_slid = pn.widgets.IntSlider(value=0,\n start=0,\n end=len(df))\n self._make_tables(df)\n\n # Define callbacks for tables:\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.time_table,\n self.df_event_time,\n event, True),\n 'value')\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.prop_table,\n self.df_event_properties,\n event),\n 'value')\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.pos_table,\n self.df_event_position,\n event),\n 'value')\n\n # Now make title and also define callback:\n title = self._make_title(self.evt_sel_slid.value)\n self.title_panel = pn.panel(title, sizing_mode='scale_width')\n\n def title_callback(event):\n self.title_panel.object = self._make_title(event.new)\n\n self.evt_sel_slid.param.watch(title_callback, 'value')",
"def createUI(self):\n\n q.getQItem(windowID, QtWidgets.QWidget)\n cmds.setParent(q.fullPath)\n\n # ################################################\n # Active Render Layer\n\n # cmds.separator(height=12, style='none')\n addFrameLayout(\n '%s_frameLayoutLayers' % windowID,\n 'Visible Render Layer', collapsable=False,\n labelVisible=False,\n marginHeight=0\n )\n\n addRowLayout(\n '%s_rowLayoutActiveRenderLayer' % windowID,\n 4,\n columnAlign4=('left', 'left', 'right', 'right'),\n columnAttach4=('left', 'both', 'right', 'right'),\n columnWidth4=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.775,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075\n )\n )\n\n\n addButton('%s_addNewLayer' % windowID, 'New', rsAddNewLayer,\n image='RS_create_layer', size=(21, 21))\n addOptionMenu('%s_selectActiveLayer' % windowID,\n 'Active Layer ', (), rsSelectActiveLayer)\n addButton('rsOpenRenderSetupWindow', 'Render Setup',\n rsOpenRenderSetupWindow, image='render_setup.png',\n size=(21, 21))\n addButton('rsOpenUnifiedRenderGlobals', 'Render Globals',\n rsOpenUnifiedRenderGlobals, image='render_setup.png',\n size=(21, 21))\n\n # ################################################\n # Work Render Layers\n\n cmds.setParent(q.fullPath)\n addFrameLayout('%s_frameLayoutLayersB' % windowID,\n 'Work Render Layer', collapsable=False,\n labelVisible=False, marginHeight=0)\n addRowLayout('%s_rowLayoutVisibleRenderLayer' % windowID, 3,\n columnAlign3=('left', 'left', 'right'),\n columnAttach3=('left', 'both', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.075, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.85,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075))\n\n cmds.separator()\n addOptionMenu('%s_selectVisibleLayer' % windowID,\n 'Visible Layer ', (), rsSelectVisibleLayer)\n cmds.separator()\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=12, style='none')\n\n # ################################################\n # Collections\n\n addFrameLayout('%s_frameLayout02' % windowID, 'Collections',\n labelVisible=False, marginHeight=0)\n\n addRowLayout(\n '%s_rowLayout02' % windowID,\n 6,\n columnAlign6=('left', 'left', 'left', 'left', 'left', 'left'),\n columnAttach6=('both', 'both', 'right', 'right', 'right', 'right'),\n columnWidth6=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.415,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n )\n )\n\n addButton('rsAddCollection', 'Add', rsAddCollection)\n addButton('rsRemoveCollection', 'Remove', rsRemoveCollection)\n addButton('rsSelectShapes', 'Select Shapes', rsSelectShapes,\n image='selectObject.png', size=(21, 21))\n addButton('rsRenameShader', 'Rename Shader', rsRenameShader,\n size=(21, 21), image='QR_rename.png')\n addButton('rsDuplicateShader', 'Duplicate Shader',\n duplicateShader, size=(21, 21), image='newPreset.png')\n addButton('rsRefreshUI', 'Refresh', rsRefreshUI, size=(21, 21),\n image='QR_refresh.png')\n\n # ###########################\n # Filter List\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout03' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.6, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.42))\n\n addTextField('%s_filterShaderList' % windowID, 'Search',\n rsFilterShaderList_off, rsFilterShaderList_off,\n window.updateUI)\n addOptionMenu('rsShaderGroups', '|', (), rsShaderGroups)\n\n # ###########################\n # The shaders scroll list\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout04' % windowID, 1, columnAlign1='both', columnAttach1='both', columnWidth1=WINDOW_WIDTH\n + 12)\n addTextScrollList('%s_ShaderScrollList' % windowID, (),\n rsShaderScrollList_doubleClick,\n rsShaderScrollList_onSelect,\n rsShaderScrollList_deleteKey)\n\n # Add popup menu:\n\n cmds.popupMenu('rsShaderScrollListPopupMenu',\n parent='%s_ShaderScrollList' % windowID,\n allowOptionBoxes=False, markingMenu=True,\n postMenuCommand=postMenuCommand)\n cmds.menuItem('%s_popupMenuItem02' % windowID,\n label='Duplicate Shader', command=duplicateShader)\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem04' % windowID,\n label='Graph Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem03' % windowID,\n label='Select Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem05' % windowID,\n label='Select Assigned Shapes')\n cmds.menuItem('%s_popupMenuItem06' % windowID,\n label='Select Assigned Transforms')\n\n # ##################################################\n # Arnold Property Overrides\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout20' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n addRowLayout('%s_rowLayout05' % windowID, 2,\n columnAlign2=('left', 'both'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_textArnoldPropertyOverridesLabel' % windowID,\n 'Apply Arnold Property Overrides', 'plainLabelFont')\n addCheckBox('rsArnoldPropertyOverridesCheckBox', '',\n rsArnoldPropertyOverridesCheckBox,\n rsArnoldPropertyOverridesCheckBox)\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n # Column Layout to toggle\n\n cmds.setParent('%s_columnLayout20' % windowID)\n cmds.columnLayout(\n '%s_columnLayout02' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n addCheckboxes('%s_columnLayout02' % windowID)\n cmds.columnLayout('%s_columnLayout02' % windowID, edit=True,\n visible=False)\n\n # #################################################\n # Shader Override\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout21' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n addRowLayout('%s_rowLayout06' % windowID, 2,\n columnAlign2=('left', 'right'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_shaderOverrideLabel' % windowID, 'Shader Override',\n 'plainLabelFont')\n addCheckBox('%s_shaderOverrideCheckbox' % windowID, '',\n rsShaderOverrideCheckbox, rsShaderOverrideCheckbox)\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n\n cmds.setParent('%s_columnLayout21' % windowID)\n cmds.columnLayout(\n '%s_columnLayout03' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('both', 4),\n adjustableColumn=True,\n rowSpacing=0,\n )\n cmds.setParent('%s_columnLayout03' % windowID)\n addOptionMenu('%s_optionMenu02' % windowID, 'Select: ', (),\n rsShaderOverridesMenu)\n\n global selectedShaderOverride\n\n # default selection\n\n selectedShaderOverride = SHADER_OVERRIDE_OPTIONS[0]['ui']\n cmds.columnLayout('%s_columnLayout03' % windowID, edit=True,\n visible=False)\n\n # #################################################\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=10, style='none')\n\n # #################################################\n # Extras\n\n addFrameLayout('%s_frameLayout50' % windowID, 'Extras',\n collapsable=True, marginHeight=0,\n labelVisible=False)\n\n # #################################################\n # Add & Assign Shader Groups\n\n addFrameLayout(\n '%s_frameLayout05' % windowID,\n 'Add & Assign Shader Groups',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=False,\n labelVisible=True,\n )\n\n # Add the renamer window\n\n self.gwCustomRenamer = CustomRenamer()\n self.gwCustomRenamer.createUI()\n\n # #################################################\n # AutoConnect\n\n cmds.setParent('%s_frameLayout50' % windowID)\n\n addFrameLayout(\n '%s_frameLayout03' % windowID,\n 'Adobe Connector',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout07', 3, columnAlign3=('left', 'left',\n 'left'), columnAttach3=('both', 'both', 'both'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addButton('updateConnections', '> Update Connections <',\n updateConnections)\n addButton('uvSnapshot', 'UV Snapshot', uvSnapshot)\n addButton('editTexture', 'Edit Texture', editTexture)\n\n # After Effects\n\n cmds.setParent('%s_frameLayout03' % windowID)\n addRowLayout('%s_rowLayout11' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.6))\n addText('%s_text90' % windowID, 'Send to After Effects:')\n addButton('makeCompButton', 'Send to After Effects', rsMakeComp)\n\n # #################################################\n # Render Setup /\n # Output settings\n\n cmds.setParent('%s_frameLayout50' % windowID)\n addFrameLayout(\n '%s_frameLayout04' % windowID,\n 'Output Settings',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout08' % windowID, 1,\n columnAlign1='center', columnAttach1='both',\n columnWidth1=WINDOW_WIDTH - FRAME_MARGIN * 2)\n addButton('%s_revealOutputDirectory' % windowID,\n 'Output path not set yet', rsRevealOutputDirectory)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout09' % windowID, 3,\n columnAlign3=('left', 'right', 'right'),\n columnAttach3=('left', 'right', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.8, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.14,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.06))\n\n addOptionMenu('%s_optionMenu05' % windowID, '', (),\n rsSelectOutputTemplate)\n addOptionMenu('%s_outputVersionMenu' % windowID, '', (),\n rsSelectOutputVersion)\n cmds.menuItem(label='v001')\n\n cmds.setParent('%s_rowLayout09' % windowID)\n addButton('%s_incrementOutputVersionButton' % windowID, '+1',\n rsIncrementOutputVersion, size=(21, 21))\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout10' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.7, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addOptionMenu('%s_optionMenu03' % windowID, 'Format:', (),\n rsOutputTemplatesMenu)\n addOptionMenu('%s_optionMenu06' % windowID, '', (),\n rsSetFPSMenu)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout12' % windowID, 4,\n columnAlign4=('right', 'left', 'right', 'left'),\n columnAttach4=('both', 'both', 'both', 'both'),\n columnWidth4=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.50, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.20,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15))\n\n addText('%s_setInFrameLabel' % windowID, 'In Frame ')\n addTextField('%s_setInFrame' % windowID, '', setInFrame,\n setInFrame, setInFrame)\n\n addText('%s_setOutFrameLabel' % windowID, 'Out Frame ')\n addTextField('%s_setOutFrame' % windowID, '', setOutFrame,\n setOutFrame, setOutFrame)",
"def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win",
"def panel(*args, control: bool=True, copy: AnyStr=\"\", createString: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", editString: bool=True, exists: bool=True,\n init: bool=True, isUnique: bool=True, label: Union[AnyStr, bool]=\"\",\n menuBarRepeatLast: bool=True, menuBarVisible: bool=True, needsInit: bool=True,\n parent: AnyStr=\"\", popupMenuProcedure: Union[Script, bool]=None, replacePanel:\n AnyStr=\"\", tearOff: bool=True, tearOffCopy: AnyStr=\"\", tearOffRestore: bool=True,\n unParent: bool=True, useTemplate: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def submitQuery(self):\r\n #open connection, retrieve cursor and execute query\r\n\r\n try:\r\n connection = pymysql.connect(host='localhost', user='root',\r\n password='user', db='bank',\r\n charset='utf8mb4',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(self.query.get())\r\n except (pymysql.OperationalError):\r\n errorMessage = \"Error\"\r\n showerror(\"Error\", errorMessage)\r\n return\r\n\r\n else: #obtain user requested info\r\n data = cursor.fetchall()\r\n fields = cursor.description\r\n cursor.close()\r\n connection.close()\r\n\r\n # clear results of last query\r\n self.panes.destroy()\r\n self.panes = Pmw.PanedWidget(self.frame.interior(),\r\n orient = \"vertical\")\r\n self.panes.pack(expand = YES, fill = BOTH)\r\n\r\n # create panel and label for each field\r\n for item in fields:\r\n## print(item)\r\n self.panes.add(item[0])\r\n label = Label(self.panes.pane(item[0]),\r\n text = item[0], relief = RAISED)\r\n label.pack(fill = X)\r\n\r\n # enter results into panes using labels\r\n for entry in data:\r\n\r\n for i in range(len(entry)):\r\n## print(entry)\r\n label = Label(self.panes.pane(fields[i][0]),\r\n text = entry[fields[i][0]], anchor = W, \r\n relief = GROOVE, bg = \"white\")\r\n label.pack(fill = X)\r\n\r\n self.panes.setnaturalsize()",
"def table_builder(request):\n kind=request.param\n def _builder(data, columns):\n if kind==\"array\":\n return np.array(data)\n elif kind==\"table\":\n return DataTable(data,columns,transposed=False)\n else:\n return pd.DataFrame(data,columns=columns)\n _builder.kind=kind\n return _builder",
"def TableFieldWidget(field, request):\n return widget.FieldWidget(field, TableWidget(request))",
"def get_panels(config):\n\n task = TaskPanels(config)\n task.execute()\n\n task = TaskPanelsMenu(config)\n task.execute()\n\n logging.info(\"Panels creation finished!\")",
"def create_ui(self, parent):\n view = View(\n Item(name=\"text\",\n show_label=False,\n editor=ImageTraitEditor(\n image=ImageResource(self.obj.name,\n search_path=[self.obj.parent.absolute_path]) )),\n id=\"puddle.image_editor.image_editor\",\n kind=\"live\", resizable=True)\n\n ui = self.edit_traits(view=view, parent=parent, kind=\"subpanel\")\n\n return ui",
"def create_widget(self):\n pass",
"def create_win_create_table():\n global data_base, table, enter_table_name, win_create_table, enter_column_values\n if data_base == '':\n mistake_load_table()\n else:\n win_create_table = Toplevel(root)\n win_create_table.title(\"Ввод\")\n win_create_table.geometry('320x150')\n win_create_table.resizable(height=False, width=False)\n enter_table_name = Entry(win_create_table)\n enter_table_name.place(x=15, y=40, width=210)\n lbl2 = Label(win_create_table, text=\"Введите название таблицы: \")\n lbl2.place(x=10, y=10)\n enter_data_button = ttk.Button(win_create_table, text=\" Ввод \", command=get_create_data)\n enter_data_button.place(x=250, y=70)\n lbl3 = Label(win_create_table, text=\"Введите через запятую: Название книги, \\nИмя автора и Год публикации\")\n lbl3.place(x=10, y=70)\n enter_column_values = Entry(win_create_table)\n enter_column_values.place(x=15, y=110, width=200)",
"def getWidget(self):\n \n firstDataset = DashboardDataset.objects.filter(visualisation=self)[0]\n \n widget = {'name': self.name,\n 'id': \"vis\" + str(self.pk),\n 'pk': self.pk,\n 'category': self.category.name,\n 'type': self.type,\n 'dataset': [json.loads(d.dataJSON, cls=util.DateTimeDecoder) for d in DashboardDataset.objects.filter(visualisation=self)],\n 'datasetLabels': [d.name for d in DashboardDataset.objects.filter(visualisation=self)],\n 'sourceName': self.dataSource.name,\n 'sourceLink': self.dataSource.link,\n 'datasetName': firstDataset.name,\n 'datasetLink': firstDataset.link,\n 'description': self.description,\n 'xLabel': self.xLabel,\n 'yLabel': self.yLabel,\n 'sizeX': self.sizeX,\n 'sizeY': self.sizeY}\n return widget"
] | [
"0.6599751",
"0.60347813",
"0.6001966",
"0.59753203",
"0.5832454",
"0.58038557",
"0.5691837",
"0.568478",
"0.558809",
"0.55831575",
"0.5527184",
"0.5521518",
"0.55198944",
"0.5481038",
"0.5476279",
"0.54733795",
"0.5462939",
"0.54569745",
"0.54461217",
"0.54309857",
"0.5420449",
"0.54129463",
"0.5400823",
"0.5386913",
"0.5382899",
"0.53809905",
"0.5371665",
"0.5366367",
"0.5360458",
"0.5348229"
] | 0.62888616 | 1 |
Get the next update from the queue. If no update is found, block the process until one is received. If a stop signal is sent, try to gracefully stop the thread. | def __receive_next_update(self) -> telegram.Update:
# Pop data from the queue
try:
data = self.queue.get(timeout=self.cfg.telegram["conversation_timeout"])
except queuem.Empty:
# If the conversation times out, gracefully stop the thread
self.__graceful_stop(StopSignal("timeout"))
# Check if the data is a stop signal instance
if isinstance(data, StopSignal):
# Gracefully stop the process
log.debug("Waiting for a specific message...")
self.__graceful_stop(data)
# Return the received update
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n data = \"\"\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def get_updates(self):\n if update_queue:\n return update_queue.pop()",
"def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def pop_one(self):\n with self.lock:\n if self.please_stop:\n return [THREAD_STOP]\n elif not self.queue:\n return None\n else:\n v =self.queue.pop()\n if v is THREAD_STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION\n self.please_stop.go()\n return v",
"def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()",
"def next(self):\n while True: # waiting\n item = self.get_next_if_any()\n if item is not None: # feature: value None is filtered out\n return item\n\n if self.nomore: # if nothing else is coming\n break # stop waiting\n\n time.sleep(0.1) # wait before checking again\n\n raise StopIteration() # tell next worker nothing else is coming",
"def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration",
"def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)",
"def get(self):\n while self.is_running():\n try:\n inputs = self.queue.get(block=True, timeout=5).get()\n if self.is_running():\n self.queue.task_done()\n if inputs is not None:\n yield inputs\n except queue.Empty:\n pass\n except Exception as e: # pylint: disable=broad-except\n self.stop()\n raise e",
"def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return",
"def get_next_cell(self):\r\n if len(self.queue) == 0:\r\n return\r\n val = self.peek_queue()\r\n self.queue.remove(val)\r\n return val[0]",
"async def get_next(self) -> Probe:\n schedule: Optional[Schedule] = None\n while schedule is None:\n try:\n # Try to get the earliest scheduled probe\n schedule = self.queue[0]\n except IndexError:\n # If there is none, wait for a change\n async with self.queue_changed:\n await self.queue_changed.wait()\n else:\n # Wait until it's time to run the scheduled probe\n with trio.move_on_at(schedule.next_time):\n # However, if the queue changes before it's time to run,\n # we forget the selected schedule to re-elect a new one.\n async with self.queue_changed:\n await self.queue_changed.wait()\n schedule = None\n # Just before running it, check if it's not actually removed\n if schedule is not None and schedule.removed:\n heapq.heappop(self.queue)\n schedule = None\n # Immediately reschedule the next run of the selected probe\n schedule.advance()\n heapq.heapreplace(self.queue, schedule)\n # Then let the caller actually run the elected probe\n return schedule.probe",
"def my_consumer(q):\n while True:\n data = q.get()\n print('data found to be processed: {}'.format(data))\n processed = data * 2\n print(processed)\n\n if data is sentinel:\n break",
"def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass",
"def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return",
"def __update(self):\n player = MediaPlayer(self.video_path) if self.play_audio else None\n # keep looping infinitely\n while True:\n # if the thread indicator variable is set, stop the\n # thread\n if self.stopped:\n break\n\n # otherwise, ensure the queue has room in it\n if not self.Q.full():\n # read the next frame from the file\n (grabbed, frame) = self.stream.read()\n\n # if the `grabbed` boolean is `False`, then we have\n # reached the end of the video file\n if not grabbed:\n self.stopped = True\n\n # if there are transforms to be done, might as well\n # do them on producer thread before handing back to\n # consumer thread. ie. Usually the producer is so far\n # ahead of consumer that we have time to spare.\n #\n # Python is not parallel but the transform operations\n # are usually OpenCV native so release the GIL.\n #\n # Really just trying to avoid spinning up additional\n # native threads and overheads of additional\n # producer/consumer queues since this one was generally\n # idle grabbing frames.\n if self.transform:\n frame = self.transform(frame)\n\n # add the frame to the queue\n self.Q.put(frame)\n else:\n time.sleep(0.1) # Rest for 10ms, we have a full queue\n if player is not None:\n player.close_player()\n self.stream.release()",
"def run(self):\n while self._update_func():\n self.update_signal.emit(None)",
"def run(self):\n while True: # make sure to run at least once before exiting\n with self._lock:\n self._update(self._data)\n if self._done:\n break\n time.sleep(1)",
"def processWork(self):\n while self.running == True:\n if len(self.work_queue) == 0:\n self.work_queue = [Instruction('Do Math'), Instruction('Send HUPD'), Instruction('Receive All HUPDs')]\n else:\n instruction = self.work_queue.pop(0)\n if instruction.type == 'Do Math':\n #start calculations\n self.updated = False\n #print('Doing Math')\n # run calculations\n elif instruction.type == 'Send HUPD':\n #echo host update to all other hosts on the network\n min_max = str(self.x_min) + ':' + str(self.x_max)\n payload = 'a' + '\\0' + 'b' + '\\0' + 'c' + '\\0' + 'd' + '\\0' + 'e' + '\\0' + 'f' + '\\0' + 'g' + '\\0' + min_max + '\\0'\n our_update = Message(\"HUPD\", self.ip, payload)\n #if there are no connections, send to myself\n for connection in self.connections:\n connection.host_sock.sendall(our_update.generateByteMessage())\n elif instruction.type == 'Receive All HUPDs':\n # make sure to receive all HUPDs from listening threads\n if len(self.connections) > 0:\n while len(self.updates_received) != len(self.connections):\n msg = 'wait'\n # only set to true once all updates have been received\n self.updated = True\n self.updates_received = []\n # Once all updates are recieved update ABoid locations\n self.all_alphas = []\n elif instruction.type == 'NHST':\n #New host tring to connect to network\n new_host_ip = instruction.message.origin\n payload_array = instruction.message.payload.split(':')\n\n #check if the new host is a neighbor\n if self.x_max == self.curr_x_max:\n self.r_neighbor = new_host_ip\n if self.x_min == self.curr_x_min:\n self.l_neighbor = new_host_ip\n self.host_ips.append(new_host_ip)\n #Start the thread that is listening to the socket connected to the new host\n new_thread = Thread(target=lambda: self.listenToHost(instruction.sock))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(new_host_ip, instruction.sock, new_thread)\n self.connections.append(new_connection)\n host_area = str(self.x_min) + ':' + str(self.x_max)\n #send current host area to the newly connected host\n area_message = Message('AREA', self.ip, host_area)\n instruction.sock.sendall(area_message.generateByteMessage())\n print('Sent AREA message to ' + new_host_ip)\n elif instruction.type == 'LHST':\n #Host has disconnected to the network\n for host_ip in self.host_ips:\n if host_ip == instruction.message.origin:\n #remove host from list of connected ips\n self.host_ips.remove(host_ip)\n for connection in self.connections:\n #remove the connection object from list of known connections\n if connection.ip == instruction.message.origin:\n #close the hosts socket and thread\n connection.close()\n self.connections.remove(connection)\n else:\n print('Invalid Instruction - skipping...')\n return",
"def threadloop(self): # , finish=False):\n while True:\n args = self.queue.get()\n if args is STOP:\n self.queue.put(STOP)\n self.queue.task_done()\n break\n try:\n args[0](*args[1], **args[2])\n finally:\n # clean up the queue, raise the exception.\n self.queue.task_done()\n # raise",
"def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None",
"def on_tick(self):\n if ((len(self._queue) >= self.config.batchsize) or\n (time.time() - self._last_get > self.config.batchtime and self._queue)):\n self._get()",
"def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()",
"def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()",
"async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return",
"def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)",
"def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()",
"def getNextUpdate(self):\n\n return self.get_POW().getNextUpdate()"
] | [
"0.75623107",
"0.61685276",
"0.5938287",
"0.591084",
"0.58862364",
"0.5823619",
"0.5713644",
"0.56726706",
"0.5642151",
"0.5579003",
"0.55334973",
"0.5469232",
"0.54662484",
"0.5425067",
"0.54067975",
"0.54012185",
"0.53892756",
"0.5383263",
"0.537932",
"0.53742594",
"0.5365099",
"0.53409696",
"0.53385437",
"0.5331959",
"0.53242314",
"0.53223526",
"0.53196365",
"0.53189534",
"0.5313846",
"0.53132695"
] | 0.778689 | 0 |
Get the next update from the queue. If no update is found, block the process until one is received. If a stop signal is sent, try to gracefully stop the thread. | def receive_next_update(self) -> telegram.Update:
# Pop data from the queue
data = ""
try:
data = self.queue.get(timeout=self.cfg.telegram["conversation_timeout"])
except queuem.Empty:
# If the conversation times out, gracefully stop the thread
self.__graceful_stop(StopSignal("timeout"))
# Check if the data is a stop signal instance
if isinstance(data, StopSignal):
# Gracefully stop the process
log.debug("Waiting for a specific message...")
self.__graceful_stop(data)
# Return the received update
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def get_updates(self):\n if update_queue:\n return update_queue.pop()",
"def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def pop_one(self):\n with self.lock:\n if self.please_stop:\n return [THREAD_STOP]\n elif not self.queue:\n return None\n else:\n v =self.queue.pop()\n if v is THREAD_STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION\n self.please_stop.go()\n return v",
"def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()",
"def next(self):\n while True: # waiting\n item = self.get_next_if_any()\n if item is not None: # feature: value None is filtered out\n return item\n\n if self.nomore: # if nothing else is coming\n break # stop waiting\n\n time.sleep(0.1) # wait before checking again\n\n raise StopIteration() # tell next worker nothing else is coming",
"def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration",
"def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)",
"def get(self):\n while self.is_running():\n try:\n inputs = self.queue.get(block=True, timeout=5).get()\n if self.is_running():\n self.queue.task_done()\n if inputs is not None:\n yield inputs\n except queue.Empty:\n pass\n except Exception as e: # pylint: disable=broad-except\n self.stop()\n raise e",
"def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return",
"def get_next_cell(self):\r\n if len(self.queue) == 0:\r\n return\r\n val = self.peek_queue()\r\n self.queue.remove(val)\r\n return val[0]",
"async def get_next(self) -> Probe:\n schedule: Optional[Schedule] = None\n while schedule is None:\n try:\n # Try to get the earliest scheduled probe\n schedule = self.queue[0]\n except IndexError:\n # If there is none, wait for a change\n async with self.queue_changed:\n await self.queue_changed.wait()\n else:\n # Wait until it's time to run the scheduled probe\n with trio.move_on_at(schedule.next_time):\n # However, if the queue changes before it's time to run,\n # we forget the selected schedule to re-elect a new one.\n async with self.queue_changed:\n await self.queue_changed.wait()\n schedule = None\n # Just before running it, check if it's not actually removed\n if schedule is not None and schedule.removed:\n heapq.heappop(self.queue)\n schedule = None\n # Immediately reschedule the next run of the selected probe\n schedule.advance()\n heapq.heapreplace(self.queue, schedule)\n # Then let the caller actually run the elected probe\n return schedule.probe",
"def my_consumer(q):\n while True:\n data = q.get()\n print('data found to be processed: {}'.format(data))\n processed = data * 2\n print(processed)\n\n if data is sentinel:\n break",
"def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass",
"def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return",
"def __update(self):\n player = MediaPlayer(self.video_path) if self.play_audio else None\n # keep looping infinitely\n while True:\n # if the thread indicator variable is set, stop the\n # thread\n if self.stopped:\n break\n\n # otherwise, ensure the queue has room in it\n if not self.Q.full():\n # read the next frame from the file\n (grabbed, frame) = self.stream.read()\n\n # if the `grabbed` boolean is `False`, then we have\n # reached the end of the video file\n if not grabbed:\n self.stopped = True\n\n # if there are transforms to be done, might as well\n # do them on producer thread before handing back to\n # consumer thread. ie. Usually the producer is so far\n # ahead of consumer that we have time to spare.\n #\n # Python is not parallel but the transform operations\n # are usually OpenCV native so release the GIL.\n #\n # Really just trying to avoid spinning up additional\n # native threads and overheads of additional\n # producer/consumer queues since this one was generally\n # idle grabbing frames.\n if self.transform:\n frame = self.transform(frame)\n\n # add the frame to the queue\n self.Q.put(frame)\n else:\n time.sleep(0.1) # Rest for 10ms, we have a full queue\n if player is not None:\n player.close_player()\n self.stream.release()",
"def run(self):\n while self._update_func():\n self.update_signal.emit(None)",
"def run(self):\n while True: # make sure to run at least once before exiting\n with self._lock:\n self._update(self._data)\n if self._done:\n break\n time.sleep(1)",
"def processWork(self):\n while self.running == True:\n if len(self.work_queue) == 0:\n self.work_queue = [Instruction('Do Math'), Instruction('Send HUPD'), Instruction('Receive All HUPDs')]\n else:\n instruction = self.work_queue.pop(0)\n if instruction.type == 'Do Math':\n #start calculations\n self.updated = False\n #print('Doing Math')\n # run calculations\n elif instruction.type == 'Send HUPD':\n #echo host update to all other hosts on the network\n min_max = str(self.x_min) + ':' + str(self.x_max)\n payload = 'a' + '\\0' + 'b' + '\\0' + 'c' + '\\0' + 'd' + '\\0' + 'e' + '\\0' + 'f' + '\\0' + 'g' + '\\0' + min_max + '\\0'\n our_update = Message(\"HUPD\", self.ip, payload)\n #if there are no connections, send to myself\n for connection in self.connections:\n connection.host_sock.sendall(our_update.generateByteMessage())\n elif instruction.type == 'Receive All HUPDs':\n # make sure to receive all HUPDs from listening threads\n if len(self.connections) > 0:\n while len(self.updates_received) != len(self.connections):\n msg = 'wait'\n # only set to true once all updates have been received\n self.updated = True\n self.updates_received = []\n # Once all updates are recieved update ABoid locations\n self.all_alphas = []\n elif instruction.type == 'NHST':\n #New host tring to connect to network\n new_host_ip = instruction.message.origin\n payload_array = instruction.message.payload.split(':')\n\n #check if the new host is a neighbor\n if self.x_max == self.curr_x_max:\n self.r_neighbor = new_host_ip\n if self.x_min == self.curr_x_min:\n self.l_neighbor = new_host_ip\n self.host_ips.append(new_host_ip)\n #Start the thread that is listening to the socket connected to the new host\n new_thread = Thread(target=lambda: self.listenToHost(instruction.sock))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(new_host_ip, instruction.sock, new_thread)\n self.connections.append(new_connection)\n host_area = str(self.x_min) + ':' + str(self.x_max)\n #send current host area to the newly connected host\n area_message = Message('AREA', self.ip, host_area)\n instruction.sock.sendall(area_message.generateByteMessage())\n print('Sent AREA message to ' + new_host_ip)\n elif instruction.type == 'LHST':\n #Host has disconnected to the network\n for host_ip in self.host_ips:\n if host_ip == instruction.message.origin:\n #remove host from list of connected ips\n self.host_ips.remove(host_ip)\n for connection in self.connections:\n #remove the connection object from list of known connections\n if connection.ip == instruction.message.origin:\n #close the hosts socket and thread\n connection.close()\n self.connections.remove(connection)\n else:\n print('Invalid Instruction - skipping...')\n return",
"def threadloop(self): # , finish=False):\n while True:\n args = self.queue.get()\n if args is STOP:\n self.queue.put(STOP)\n self.queue.task_done()\n break\n try:\n args[0](*args[1], **args[2])\n finally:\n # clean up the queue, raise the exception.\n self.queue.task_done()\n # raise",
"def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None",
"def on_tick(self):\n if ((len(self._queue) >= self.config.batchsize) or\n (time.time() - self._last_get > self.config.batchtime and self._queue)):\n self._get()",
"def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()",
"def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()",
"async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return",
"def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)",
"def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()",
"def getNextUpdate(self):\n\n return self.get_POW().getNextUpdate()"
] | [
"0.77863574",
"0.61682695",
"0.59378856",
"0.5911545",
"0.5886102",
"0.5824382",
"0.5714152",
"0.5673009",
"0.56416255",
"0.5580509",
"0.5534133",
"0.54715234",
"0.5465117",
"0.54249656",
"0.5407767",
"0.5402611",
"0.53896356",
"0.53829896",
"0.5379179",
"0.5373928",
"0.5366659",
"0.5341444",
"0.5341224",
"0.5332694",
"0.5324122",
"0.53239334",
"0.5319495",
"0.5319427",
"0.53138876",
"0.53110933"
] | 0.7561312 | 1 |
Continue getting updates until until one of the strings contained in the list is received as a message. | def __wait_for_specific_message(self,
items: List[str],
cancellable: bool = False) -> Union[str, CancelSignal]:
log.debug("Waiting for a specific message...")
while True:
# Get the next update
update = self.__receive_next_update()
log.debug(f"get command {update.message.text}")
log.debug(f"get command list {items}")
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update contains a message
if update.message is None:
continue
# Ensure the message contains text
if update.message.text is None:
continue
# Check if the message is contained in the list
if update.message.text not in items:
continue
# Return the message text
return update.message.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handl_update(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n items = db.get_item(chat)\n if text == \"/done\":\n keyboard = build_keyboard(items)\n send_message(\"Sélectionner un items pour le suprimer\", chat, keyboard)\n elif text == \"/start\":\n send_message(\"Bienvenue dans votre liste de tâches personnelles. \"\n \"Envoyez-moi n'importe quel texte et je le stockerai comme un article.\"\n \" Envoyé (/) terminé pour supprimer des éléments\", chat)\n elif text.startswith(\"/\"):\n continue\n elif text in items:\n db.delete_item(text, chat)\n items = db.get_item(chat)\n keyboard = build_keyboard(items)\n send_message(\"Sélectionner un items pour le suprimer \", chat, keyboard)\n else:\n db.add_item(text, chat)\n items = db.get_item(chat)\n # print(\"items \", items)\n message = \"\\n\".join(items)\n # print(\"message\", message)\n send_message(message, chat)",
"def wait_for_specific_message(self,\n items: List[str],\n cancellable: bool = False) -> Union[str, CancelSignal]:\n log.debug(\"Waiting for a specific message...\")\n while True:\n # Get the next update\n update = self.__receive_next_update()\n try:\n log.debug(f\"get command {update.message.text}\")\n log.debug(f\"get command list {items}\")\n except:\n log.debug(f\"update.message.text is None\")\n # If a CancelSignal is received...\n if isinstance(update, CancelSignal):\n # And the wait is cancellable...\n if cancellable:\n # Return the CancelSignal\n return update\n else:\n # Ignore the signal\n continue\n # Ensure the update contains a message\n if update.message is None:\n # @todo подумать как отделить команды из основного меню и команды из второго меню\n if not update.callback_query is None:\n log.debug(f\"get second command {update.callback_query.data}\")\n return update\n continue\n # Ensure the message contains text\n if update.message.text is None:\n continue\n # Check if the message is contained in the list\n if update.message.text not in items:\n continue\n # Return the message text\n return update.message.text",
"def send_waiting_messages(wlist, messages_to_send):\n for message in messages_to_send:\n if wlist != []:\n wlist[0].send(message)\n messages_to_send.remove(message)",
"def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break",
"async def _process_push_updates(self):\n while True:\n # print(\"Establishing SSE connection...\")\n try:\n sse_connection_id, sse_client = self.__service_provider.connect_sse(\n WorklistUpdateManagerApi\n )\n while True:\n # print(\"SSE connection established, registering for worklist push...\")\n callback_data = ClientWorklistSseCallbackData(\n sse_conn=sse_connection_id,\n sub_class=\"ClientWorklistSseCallbackData\",\n id=self.__worklist.worklist_id,\n client_worklist_id=self.__worklist.client_worklist_id,\n agent=self.__worklist.agent,\n revision=self.__worklist.revision,\n wu_conf=self.__worklist.wu_conf,\n )\n wum: WorklistUpdateManagerApi = self.__service_provider.get_service(\n WorklistUpdateManagerApi\n )\n wum.register_client_worklist_sse(callback_data)\n # print(\"Worklist registered for SSE push\")\n self.__push_sse_client = sse_client\n for event in sse_client:\n if event.event == \"SseConnectionEstablished\":\n # print('SSE session was re-established, re-registering..')\n callback_data.sse_conn = event.data\n callback_data.revision = (self.__worklist.revision,)\n wum.register_client_worklist_sse(callback_data)\n # print(\"Worklist registered again for SSE push\")\n elif event.event == \"client-worklist-update\":\n # print(\"Worklist update received\")\n try:\n update_dict = json.loads(event.data)\n update: WorklistUpdate = self.__service_provider.deserialize(\n update_dict, WorklistUpdate\n )\n self.__apply_worklist_updates(\n update.source_revision,\n update.target_revision,\n update.item_updates,\n )\n # call the listeners\n self._notify_worklist_update_listeners(update.item_updates)\n except Exception as e:\n print(\"Couldn't deserialize and apply update: \", event, e)\n else:\n print(f\"Unknown worklist SSE push event {event.event} received\")\n except ConnectionError:\n # re-establish connection after some wait time\n # print(\"SSE disconnected...\")\n await sleep(self.__af_conf.sse_connect_retry_wait)\n except Exception as e:\n print(\"Unknown exception caught during SSE handling\", e.__class__)\n traceback.print_exc()\n raise\n finally:\n self.__push_sse_client = None",
"def receiveContactList(self, contactList):",
"def receive(self, command_list):\n for cmd in command_list:\n self._handle_command(cmd)",
"def receive(self, command_list):\n for cmd in command_list:\n self._send_cmd_with_mapped_ids(cmd)",
"def note_update(self, upd_note_handle_list):\n for handle in upd_note_handle_list :\n if handle in self.data:\n self.rebuild()\n break",
"def handle_update_string(self, update_string):\n self.retrieve_taglist()\n\n self.taglist.populate_update_list(update_string)\n\n validate_updates(self.taglist)",
"def callback_crypto_currency_listing(message):\n body = json.loads(message.body.decode('utf-8'))\n data_id = TYPE_LISTING\n\n if not self.waiters_first_msg.get(data_id):\n return\n\n while self.waiters_first_msg[data_id]:\n observer = self.waiters_first_msg[data_id].pop()\n asyncio.get_event_loop().create_task(observer.update(\n dict(\n data_id=data_id,\n data=body\n )\n ))",
"def process_serverlist(self, serverlist):\n\t\t# Note that events may be late.\n\t\t# However, mustn't work on widgets that are being\n\t\t# garbage collected.\n\t\tif not self.lobby_visible:\n\t\t\treturn\n\n\t\tnum_servers = 0\n\t\tfor key, val in serverlist.iteritems():\n\t\t\t# Either update an existing list item.\n\t\t\tif len(self.li_servers.items) > 0 and num_servers < len(self.li_servers.items):\n\t\t\t\tself.li_servers.items[num_servers].set_server(val)\n\t\t\t# Or create a new one.\n\t\t\telse:\n\t\t\t\tself.li_servers.items.append(LobbyListItem(val))\n\t\t\tnum_servers += 1",
"def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))",
"def _keep_getting_new_messages(self):\n while True:\n new_messages = self.get_new_messages()\n for message in new_messages:\n self.handle(message)\n time.sleep(self.refresh_delay)",
"def do_send_list( self, a_list ):\r\n # --- this needs to be moved to task some set up here then on there\r\n self.logger.info( \"turn on sendList\" )\r\n self.send_list_ix = 0\r\n\r\n #self.send_list = [ 180, 920, 160, 1740, 160, 780, 160, 2840, 160, 1320, 160, 1340, 160, ] # 1180, 160, 2700, 160, 12780, 200, 920, \\\r\n #160, 2680, 160, 780, 160, 800, 160, 780, 160, 920, 160, 800, 140, 800, \\\r\n # 160 ]\r\n self.send_list = a_list\r\n self.com_driver.send( \"z\\n\" )\r\n self.list_send = True # if we were mult-threaded this would have to be here\r\n\r\n return",
"def user_list_update(self):\n\t\tclient_log.debug(f'Запрос списка известных пользователей {self.username}')\n\t\treq = {\n\t\t\tACTION: USERS_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: self.username\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tself.database.add_users(ans[LIST_INFO])\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список известных пользователей.')",
"def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set",
"def publish_list(self, messages: list) -> None:",
"def publish_list(self, messages: list) -> None:",
"def on_messages(self, msg_list):\n self.stats.on_pack_recv(len(msg_list))\n\n for msg in msg_list:\n if self.state == OPEN:\n self.conn.on_message(msg)",
"def send_message_list(message_lst: list, reciever: str, receiver_data: dict,\n users_to_remove: list) -> None:\n new_prev_mes: list = []\n final_message = ''\n for message in message_lst:\n if len(new_prev_mes) == 5:\n break\n if message not in receiver_data['usr_prevs_mes']:\n receiver_data['mes_limit'] -= 1\n final_message += f'\\n{message}'\n new_prev_mes.append(message)\n receiver_data['usr_prevs_mes'] = new_prev_mes\n final_message += '\\nReply stop to stop these notifications.'\n if len(new_prev_mes) != 0:\n send_message(reciever,\n 'New Vaccine Locations Detected!',\n final_message,\n receiver_data['carrier'])\n if receiver_data['mes_limit'] <= 0:\n users_to_remove.append(reciever)",
"async def confirm_names(name_list, message):\n name_msg = \"\"\n namecount = 1\n char_name_dict[message.author.id] = {}\n\n for name in name_list:\n clean_name = re.sub('(\\[|\\().*(\\]|\\))', '', name).strip()\n char_name_dict[message.author.id][namecount] = clean_name\n name_msg += f\"{namecount}) {clean_name}\\n\"\n namecount += 1\n\n await message.channel.send(f\"These were the detected character names, please verify that they are correct.\\n\\n\"\n \"If any of the names need to be corrected, use the edit command with the number associated with the name.\\n\"\n \"For example: `edit 1 MrCorrectName` would be the command to modify the first name to 'MrCorrectName'\\n\\n\"\n \"If you would like to remove a character from the list, use the `delete` command.\\n\"\n \"For example: `delete 2` would delete the second character from the list before submitting.\\n\\n\"\n \"Please be aware that both your screenshot and your provided names will be stored for future reference.\\n\\n\"\n f\"```\\n{name_msg}\\n```\\n\"\n f\"If all listed names are correct please type `confirm`, otherwise please use the `edit` command.\")\n\n message_state[message.author.id] = 'VALIDATING'",
"async def do_modlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n mod_list = []\n string1 = ''\n string2 = ''\n for i in data['modinfo']['modList']:\n mod_list.append(i['modid'])\n for i in range(int(len(mod_list) / 2)):\n string1 += f'{mod_list[i]}, '\n for i in range(int(len(mod_list) / 2), len(mod_list)):\n string2 += f'{mod_list[i]}, '\n await bot.send_message(c, string1)\n await bot.send_message(c, string2)",
"def receive(self, command_list):\n for cmd in command_list:\n if isinstance(cmd.gate, FlushGate):\n while self._stored_commands:\n self._run()\n self.send([cmd])\n else:\n self._stored_commands.append(cmd)\n # Storage is full: Create new map and send some gates away:\n if len(self._stored_commands) >= self.storage:\n self._run()",
"def __track_updates(self):\n\n self.__bus_stations_Socket.bind((self.__ipv4, BusController.STATIONS_PORT))\n self.__bus_stations_Socket.listen(1)\n while not self.__stop_threads:\n # establish a connection\n try:\n message = \"empty message\"\n client_socket, addr = self.__bus_stations_Socket.accept()\n data = client_socket.recv(1024).decode()\n print(f\"recieved {data}\")\n if \"message\" in data:\n line_num, station_num, id, keyword = data.split(\":\")[0].split(\" \")\n message = data.split(\":\")[1]\n else:\n line_num, station_num, id = data.split(\" \")\n\n if not (line_num.isdigit() and station_num.isdigit() and id.isdigit()):\n print(\"some bus tried to access the system, but his ID doesn't match the expected\")\n elif int(line_num) not in self.__bus_dict.keys() or id not in map(lambda bus: bus.id,\n self.__bus_dict[int(line_num)]):\n print(\"an unregistered bus tried to access the system, ignored.\")\n elif \"message\" in data:\n\n self.__bus_messages.append({\n \"sender\": self.__find_bus_by_id(id),\n \"time\": time.time(),\n \"text\": message\n })\n if len(self.__bus_messages) > BusController.MAX_MESSAGES_TO_DISPLAY:\n self.__bus_messages = self.__bus_messages[:-BusController.MAX_MESSAGES_TO_DISPLAY:]\n\n print(f\"recieved a message, added to the list: {self.__bus_messages}\")\n else:\n\n relevant_bus = self.__find_bus_by_id(id)\n relevant_bus.set_station(station_num)\n self.__telegram_bot.notify_passengers_about_incoming_bus(relevant_bus)\n self.__try_remove_people_from_the_station(bus=relevant_bus)\n if int(station_num) >= BusController.MAX_STATION:\n self.remove_bus(relevant_bus)\n self.__message_sender.send_line(int(line_num), update_buses=True)\n\n except Exception as e:\n print(f\"exception in __track_updates: {e}\")\n print(\"closed track_updates\")",
"def recv(self, *messages):\n for message in messages:\n self.input.put(message)",
"async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return",
"def updateFileList(self, fileList):\n\n if fileList == self.fileList:\n return 0\n\n self.mutex.acquire()\n # init = time.time()\n # \n # while(self.bussy):\n # sleep(0.1)\n # if time.time() - init > 2*self.period:\n # return 0\n \n self.fileList = fileList\n self.mutex.release()\n return 1",
"async def update_user_list(data):\n user_list = data[60:]\n return {'type': 'update_user_list', 'data': {'user_list': user_list}}",
"def messages_update(self, ht):\n for i in ht.extract_messages().iteritems():\n self.temporary(i)"
] | [
"0.63190633",
"0.6146581",
"0.6050368",
"0.59897554",
"0.5915279",
"0.58745605",
"0.57445425",
"0.5734721",
"0.5697445",
"0.5672764",
"0.5622075",
"0.55866826",
"0.55780554",
"0.55602586",
"0.5549589",
"0.55215335",
"0.5500802",
"0.5496615",
"0.5496615",
"0.54910165",
"0.5484858",
"0.54792464",
"0.5471817",
"0.54657745",
"0.5460099",
"0.54558283",
"0.5455678",
"0.5453439",
"0.5446856",
"0.5443638"
] | 0.61539835 | 1 |
Continue getting updates until the regex finds a match in a message, then return the first capture group. | def __wait_for_regex(self, regex: str, cancellable: bool = False) -> Union[str, CancelSignal]:
log.debug("Waiting for a regex...")
while True:
# Get the next update
update = self.__receive_next_update()
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update contains a message
if update.message is None:
continue
# Ensure the message contains text
if update.message.text is None:
continue
# Try to match the regex with the received message
match = re.search(regex, update.message.text)
# Ensure there is a match
if match is None:
continue
# Return the first capture group
return match.group(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMatch(reMatch,group=0):\n if reMatch: return reMatch.group(group)\n else: return ''",
"def recvregex(self, regex):\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n\n return match",
"def _consume(self, pattern):\n if self.is_finished:\n raise StopIteration()\n found = re.match(pattern, self.text[self.pos:])\n if found is None:\n return None\n self.pos += found.end()\n return found.group()",
"def message_matches(cls, msg, regex):\n m = regex.match(msg.text)\n if m:\n return m.groups()\n return None",
"def capture_group(text: AnsibleUnsafeText, re_pattern: str, group=0) -> str:\n match_group = ''\n try:\n match_group = re.search(re_pattern, str(text)).group(group)\n except AttributeError as ae:\n print(type(ae).__name__)\n traceback.print_stack()\n return to_text(match_group)",
"def read_until_re(self, r, flags=0, timeout=None):\n match = self.read_cond(lambda x: re.search(r, x.buf, flags=flags), timeout)\n self.buf = self.buf[match.end():]\n return match if len(match.groups()) > 1 else match.group(len(match.groups()))",
"def read_until_regex(self, regex):\n with self.reading:\n while True:\n data = self.read_buffer.slice()\n match = regex.search(data)\n if match:\n break\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return((self.read_buffer.dequeue(match.end()), match))",
"async def readuntil_re(self, regex, start=0):\n self.logger.debug(\"readuntil_re: %s\", regex)\n\n try:\n match = await self.wait_for(lambda data: regex.search(data, start))\n\n m_beg, m_end = match.span()\n # We are matching against the data stored stored in bytebuffer\n # The bytebuffer is manipulated in place. After we read the data\n # the buffer may get overwritten. The match object seems to be\n # directly referring the data in bytebuffer. This causes a problem\n # when we try to find the matched groups in match object.\n #\n # In [38]: data = bytearray(b\"localhost login:\")\n #\n # In [39]: rex = re.compile(b'(?P<login>.*((?<!Last ).ogin|.sername):)|(?P<passwd>\\n.*assword:)|(?P<prompt>\\n.*[%#>])|(?P<ignore>( to cli \\\\])|(who is on this device.\\\\]\\r\\n)|(Press R\n # ...: ETURN to get started\\r\\n))\\\\s*$')\n #\n # In [40]: m = rex.search(data)\n #\n # In [41]: m.groupdict()\n # Out[41]: {'ignore': None, 'login': b'localhost login:', 'passwd': None, 'prompt': None}\n #\n # In [42]: data[:]=b'overwrite'\n #\n # In [43]: m.groupdict()\n # Out[43]: {'ignore': None, 'login': b'overwrite', 'passwd': None, 'prompt': None}\n #\n groupdict = match.groupdict()\n rdata = await self.read(m_end)\n data = rdata[:m_beg] # Data before the regex match\n matched = rdata[m_beg:m_end] # portion that matched regex\n except AssertionError:\n if self._eof:\n # We are at the EOF. Read the whole buffer and send it back\n data = await self.read(len(self._buffer))\n matched = b\"\"\n match = None\n groupdict = None\n else:\n # re-raise the exception\n raise\n\n return ResponseMatch(data, matched, groupdict, match)",
"def find_matches_to_message(\n self, message: str\n ) -> Tuple[Optional[str], Optional[Module]]:\n processed_message = message.lower()\n for _, module in self.modules.get_modules():\n if not module.is_loaded:\n continue\n for func_name, reg_list in module.module_settings.templates.items():\n for reg in reg_list:\n find_match = re.findall(reg, processed_message)\n if find_match:\n return (func_name, module)\n\n return (None, None)",
"def one(self, regex: str) -> Optional[str]:\n for f in self.find(regex):\n return f",
"def parse_message(self, it, line):\n match = self.message_line_re.match(line)\n if match is None:\n return None, next(it)\n\n file_path = os.path.normpath(\n os.path.join(os.path.dirname(self.analyzer_result),\n match.group('path')))\n\n message = Message(\n file_path,\n int(match.group('line')),\n 0,\n match.group('message').strip(),\n match.group('checker').strip())\n\n try:\n return message, next(it)\n except StopIteration:\n return message, ''",
"def _uncached_match(self, text, pos, cache):\n m = self.re.match(text, pos)\n if m is not None:\n span = m.span()\n node = RegexNode(self.name, text, pos, pos + span[1] - span[0])\n node.match = m # TODO: A terrible idea for cache size?\n return node",
"def skip_until_re(self, r, flags=0, timeout=None):\n match = self.read_cond(lambda x: re.search(r, x.buf, flags=flags), timeout)\n self.buf = self.buf[match.start():]\n return match if len(match.groups()) > 1 else match.group(len(match.groups()))",
"def process_regex(_data):\n _tmp = {}\n if _data is not None and len(_data.groups()) > 0:\n for _key in (\"head\", \"func\", \"file\", \"line\", \"tail\"):\n try:\n _val = _data.group(_key)\n if _val:\n _tmp[_key] = _val\n except Exception:\n pass\n return _tmp if _tmp else None",
"def match(self, text):\n parsed = self.regex.fullmatch(text)\n if parsed is None:\n return text, None\n group_dict = parsed.groupdict()\n return group_dict[\"description\"], group_dict[\"key\"]",
"def RunRegex(regex, string):\n m = regex.search(string)\n if m:\n return m.groups()[0]\n else:\n return None",
"def group(self, group_num):\n if self.rematch is None:\n return None\n else:\n return self.rematch.group(group_num)",
"def _reg_catch(reg_ex, text, group_id=1):\n\n\tmatch = re.search(reg_ex, text)\n\tif match:\n\t\treturn match.group(group_id)\n\telse:\n\t\treturn None",
"def get_match(field, line):\n match = re.search(field, line)\n if match:\n return match.group(1)",
"def regexp(self, pattern):\r\n match = pattern.match(self.text, self.cur)\r\n if match is not None:\r\n return match.group()",
"def find_first_regex_match(key, regex_candidates):\n for cand in regex_candidates:\n try:\n pattern = re.compile(BaseInterface.cap_match_string(cand))\n if pattern.match(key):\n return cand\n except:\n logging.warn('[ros_interface] Ignoring invalid regex string \"{0!s}\"!'.format(cand))\n\n return None",
"def fileReSeek(fh, regex):\n\n p = re.compile(regex)\n while True:\n line = fh.readline()\n if line == '':\n return None\n match = p.match(line)\n if match:\n return match",
"def FindNextEnv(self):\n if self.first:\n self.matchline = 0\n self.ind = self.matchline+1\n self.first = 0\n return self.matchline\n else:\n next_ind = self.list.findnextre(self.p, ind=self.ind)\n if next_ind is not None:\n self.matchline = next_ind\n self.ind = self.matchline+1\n return self.matchline\n else:\n return None",
"def capture_address_element(regex_object, full_address):\n full_address = normalise_address(full_address)\n capture_groups = regex_object.search(full_address)\n if capture_groups:\n return capture_groups.group(0)\n return ''",
"def _ProcessMatch(self, input_line, match_regex, line, output_stream):\n lastpos = 0\n for fullmatch in match_regex.finditer(line):\n # Add text before the match as regular text.\n if lastpos < fullmatch.start():\n starting_line = line[lastpos:fullmatch.start()]\n if self._ConsumeTextForPlugin():\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n starting_line)\n else:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n starting_line)\n\n for rulename, match in fullmatch.groupdict().items():\n if match is not None:\n if self._ConsumeTextForPlugin() and rulename != \"PluginEnd\":\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n match)\n else:\n handler = getattr(self, u\"_Handle{0}\".format(rulename), None)\n handler(input_line, match, output_stream)\n\n lastpos = fullmatch.end()\n\n # Add remainder of the line as regular text.\n if lastpos < len(line):\n remaining_line = line[lastpos:]\n if self._ConsumeTextForPlugin():\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n remaining_line)\n else:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n remaining_line)",
"def queue_regex(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"queue_regex\")",
"def process_match(text, pos):\n m, _ = parse_ent('<' + text + '>', pos - len(text))\n return len(text) - len(m) + 2",
"def regex_search(regex, regex_string):\n match = re.search(regex, regex_string)\n if match is not None:\n return match.group()",
"def getReOptionalGroup(matcher, str, groupInd):\n if str is not None:\n match = matcher.search(str)\n if match is not None:\n groups = match.groups()\n if len(groups) > groupInd:\n return groups[groupInd]\n return None",
"def findAllMatches(re_string, text, handler, start=0):\n regex = re.compile(re_string, re.MULTILINE | re.DOTALL | re.IGNORECASE)\n match = regex.search(text, start)\n results = []\n \n startpoint = -1\n endpoint = -1\n \n if match:\n startpoint = match.start()\n \n while match:\n start = match.end()\n results.append(handler(match))\n endpoint = match.end()\n match = regex.search(text, start)\n \n returntext = text\n if startpoint != -1 and endpoint != -1:\n returntext = text.replace(text[startpoint:endpoint], \"\")\n\n return results, returntext"
] | [
"0.629262",
"0.62774885",
"0.6136326",
"0.61299783",
"0.6032749",
"0.6029412",
"0.59464234",
"0.5920094",
"0.57432276",
"0.57361436",
"0.5642497",
"0.56279725",
"0.5584992",
"0.55632395",
"0.5546507",
"0.5532469",
"0.54971576",
"0.5486865",
"0.54025245",
"0.53982574",
"0.53766584",
"0.5375834",
"0.5346673",
"0.53405404",
"0.5338675",
"0.53128344",
"0.5312412",
"0.5299446",
"0.5297247",
"0.5267311"
] | 0.69942236 | 0 |
Continue getting updates until a precheckoutquery is received. The payload is checked by the core before forwarding the message. | def __wait_for_precheckoutquery(self,
cancellable: bool = False) -> Union[telegram.PreCheckoutQuery, CancelSignal]:
log.debug("Waiting for a PreCheckoutQuery...")
while True:
# Get the next update
update = self.__receive_next_update()
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update contains a precheckoutquery
if update.pre_checkout_query is None:
continue
# Return the precheckoutquery
return update.pre_checkout_query | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def answer_pre_checkout_query(self, pre_checkout_query_id: base.String, ok: base.Boolean,\n error_message: typing.Union[base.String, None] = None) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.ANSWER_PRE_CHECKOUT_QUERY, payload)\n\n return result",
"def _pre(self):\n if self.has_state_info(\"pre\"):\n self.get_state_info(\"pre\")",
"async def play_previous(self):\n _LOGGER.debug(\"[Foobar2k] In Previous\")\n if (self._power == POWER_ON):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PREVIOUS, data=None)\n time.sleep(0.2)\n await self.async_update()",
"def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)",
"def confirm_further(self, update, context):\n response_code = update.callback_query[\"data\"] # wouldyou_{yes|no}\n request_id = context.user_data[\"current_request\"]\n log.info(\"No further comments req:%s %s\", request_id, response_code)\n self.finalize_request(update, context, request_id)",
"def preQuery(self):\n self.request_url = self.url\n pass",
"def stock_processor(id, price, title, remaining, totalPackCount, preorder, start, proxy, headers):\n\n r = request_pack_stock(proxy, headers)\n packs = r['data']['searchPackListings']['data']['searchSummary']['data']['data']\n\n for pack in packs:\n item = [pack['id'], pack['title'], pack['price'], pack['remaining'], pack['totalPackCount'], pack['preorder']]\n #print(f'\\n\\nITEM:{item}\\n\\n')\n if pack['remaining'] == remaining: #change back to !=\n # Checks if it already exists in our instock\n if checker(item):\n pass\n else:\n # Add to instock dict\n INSTOCK.append(item)\n print(f'\\n\\nINSTOCK:{INSTOCK}\\n\\n')\n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n print(item)\n discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if checker(item):\n INSTOCK.remove(item)",
"def m_req_Update(self, sender, e):\r\n if e.Instrument != None and e.Error == None:\r\n # Instrument was found\r\n print(\"Found: {0}\".format(e.Instrument.Name))\r\n # Subscribe for Inside Market Data\r\n self.m_ps = ttapi.PriceSubscription(e.Instrument, ttapi.Dispatcher.Current)\r\n self.m_ps.Settings = ttapi.PriceSubscriptionSettings(ttapi.PriceSubscriptionType.InsideMarket)\r\n self.m_ps.FieldsUpdated += self.m_ps_FieldsUpdated\r\n self.m_ps.Start()\r\n elif e.IsFinal:\r\n # Instrument was not found and TT API has given up looking for it\r\n print(\"Cannot find instrument: {0}\".format(e.Error.Message))\r\n self.Dispose()",
"def awaiting_payment(self):",
"async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"ORDER_TRADE_UPDATE\": # Order update.\n self._update_order(msg[\"o\"])",
"def acknowledge_prepayment(self):\n self.acknowledge_payment()",
"def m_req_Update(self, sender, e):\r\n if e.Instrument != None and e.Error == None:\r\n # Instrument was found\r\n print(\"Found: {0}\".format(e.Instrument.Name))\r\n # Subscribe for Inside Market Data\r\n self.m_ps = ttapi.PriceSubscription(e.Instrument, ttapi.Dispatcher.Current)\r\n self.m_ps.Settings = ttapi.PriceSubscriptionSettings(ttapi.PriceSubscriptionType.InsideMarket)\r\n self.m_ps.FieldsUpdated += self.m_ps_FieldsUpdated\r\n self.m_ps.Start()\r\n # Create a TradeSubscription to listen for order / fill events only for orders submitted through it\r\n self.m_ts = tradebk.InstrumentTradeSubscription(self.m_apiInstance.Session, ttapi.Dispatcher.Current, e.Instrument, True, True, False, False)\r\n self.m_ts.OrderUpdated += self.m_ts_OrderUpdated\r\n self.m_ts.OrderAdded += self.m_ts_OrderAdded\r\n self.m_ts.OrderDeleted += self.m_ts_OrderDeleted\r\n self.m_ts.OrderFilled += self.m_ts_OrderFilled\r\n self.m_ts.OrderRejected += self.m_ts_OrderRejected\r\n self.m_ts.Start()\r\n elif e.IsFinal:\r\n # Instrument was not found and TT API has given up looking for it\r\n print(\"Cannot find instrument: {0}\".format(e.Error.Message))\r\n self.Dispose()",
"def process_reenable_request(self, config_settings, core_state_content):\n self.logger.log(\"This is the same request as the previous patch operation. Checking previous request's status\")\n if core_state_content.__getattribute__(self.core_state_fields.completed).lower() == 'false':\n running_process_ids = self.process_handler.identify_running_processes(core_state_content.__getattribute__(self.core_state_fields.process_ids))\n if len(running_process_ids) == 0:\n self.logger.log(\"Re-triggering the patch operation as the previous patch operation was not running and hadn't marked completion either.\")\n self.utility.delete_file(self.core_state_handler.dir_path, self.core_state_handler.file)\n self.launch_new_process(config_settings, create_status_output_file=False)\n else:\n self.logger.log(\"Patch operation is in progress from the previous request. [Operation={0}]\".format(config_settings.__getattribute__(self.config_public_settings.operation)))\n exit(Constants.ExitCode.Okay)\n\n else:\n self.logger.log(\"Patch operation already completed in the previous request. [Operation={0}]\".format(config_settings.__getattribute__(self.config_public_settings.operation)))\n exit(Constants.ExitCode.Okay)",
"def handleMsgs(self):\n\n force_sheep_check = self.changed_last_step\n self.changed_last_step = False\n if not self.queue:\n return\n\n need_to_check = False\n for msg in self.popMsg(): # Receive message(s) from queue.\n if msg.type == Type.BLOCK:\n new_tx = msg.content\n if new_tx.hash in self.seen_tx:\n continue\n need_to_check = True\n self.changed_last_step = True\n self.handleNewTx(new_tx, msg.sender)\n elif msg.type == Type.REQUEST: # Requests are issued by other miners.\n target_hash = msg.content\n assert target_hash in self.seen_tx # I should never get a request for a tx I haven't seen.\n requestedTx = self.seen_tx[target_hash]\n self.sendMsg(msg.sender, Message(self.id, Type.BLOCK, requestedTx))\n if need_to_check or (self.hasSheep() and force_sheep_check): # Have to check every time if has sheep.\n self.checkAllTx()",
"def update_network_precommit(self, mech_context):\n pass",
"def process_request(request, client):\n # type: (Dict,ConnectClient) -> None\n\n external_subscription_id = Utils.get_param_value(request, 'fulfillment', 'subscription_id')\n\n api_client = Utils.get_api_client()\n resume_payload = {}\n api_client.resume_subscription(resume_payload, external_subscription_id)\n\n Utils.approve_fulfillment_request(request, client)",
"def handle_payment_intent_succeeded(self, event):\n intent = event.data.object\n pid = intent.id\n bag = intent.metadata.bag\n\n billing_details = intent.charges.data[0].billing_details\n grand_total = round(intent.charges.data[0].amount / 100, 2)\n\n order_exists = False\n attempt = 1\n while attempt <= 5:\n try:\n order = Order.objects.get(\n full_name__iexact=billing_details.name,\n email__iexact=billing_details.email,\n phone_number__iexact=billing_details.phone,\n street_address1__iexact=(\n billing_details.address.line1),\n street_address2__iexact=(\n billing_details.address.line2),\n town_or_city__iexact=billing_details.address.city,\n county__iexact=billing_details.address.state,\n country__iexact=billing_details.address.country,\n grand_total=grand_total,\n original_bag=bag,\n stripe_pid=pid,\n )\n order_exists = True\n break\n except Order.DoesNotExist:\n attempt += 1\n time.sleep(1)\n\n if order_exists:\n return HttpResponse(\n content=f'Webhook received: ({event[\"type\"]}'\n '| SUCCESS: Verified order already in database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n full_name=billing_details.name,\n email=billing_details.email,\n phone_number=billing_details.phone,\n street_address1=billing_details.address.line1,\n street_address2=billing_details.address.line2,\n town_or_city=billing_details.address.city,\n county=billing_details.state,\n country=billing_details.country,\n original_bag=bag,\n stripe_pid=pid,\n )\n for workshop_id, quantity in json.loads(bag).items():\n workshop = Workshop.objects.get(id=workshop_id)\n if isinstance(quantity, int):\n order_line_item = OrderLineItem(\n order=order,\n workshop=workshop,\n quantity=quantity,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]} | ERROR: {e}',\n status=500)\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]}'\n '| SUCCESS: Created order in webhook',\n status=200)",
"def pre_loop(self, event):\n self.do_sync()",
"async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])",
"def action_payslip_done(self):\n for recd in self.late_check_in_ids:\n recd.state = 'deducted'\n return super(PayslipLateCheckIn, self).action_payslip_done()",
"def process_updates():\n print \"[{x}] Processing Requests\".format(x=dates.now())\n WorkflowApi.process_requests()\n WorkflowApi.process_enhancements()",
"async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"executionReport\": # Order update.\n if msg[\"s\"] != self._raw_symbol:\n return\n order_no = \"{}_{}\".format(msg[\"i\"], msg[\"c\"])\n if msg[\"X\"] == \"NEW\":\n status = ORDER_STATUS_SUBMITTED\n elif msg[\"X\"] == \"PARTIALLY_FILLED\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif msg[\"X\"] == \"FILLED\":\n status = ORDER_STATUS_FILLED\n elif msg[\"X\"] == \"CANCELED\":\n status = ORDER_STATUS_CANCELED\n elif msg[\"X\"] == \"REJECTED\":\n status = ORDER_STATUS_FAILED\n elif msg[\"X\"] == \"EXPIRED\":\n status = ORDER_STATUS_FAILED\n else:\n logger.warn(\"unknown status:\", msg, caller=self)\n return\n order = self._orders.get(order_no)\n if not order:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": msg[\"S\"],\n \"order_type\": msg[\"o\"],\n \"symbol\": self._symbol,\n \"price\": msg[\"p\"],\n \"quantity\": msg[\"q\"],\n \"ctime\": msg[\"O\"]\n }\n order = Order(**info)\n self._orders[order_no] = order\n order.remain = float(msg[\"q\"]) - float(msg[\"z\"])\n order.status = status\n order.utime = msg[\"T\"]\n if self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))",
"def pre_step(self):\n\n self.reward = 0",
"def receive_reload_request(self, _: EmptyMsg):\n self.update()",
"def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()",
"def proceed(self):\n pass",
"def precheck(ctx):\n rc = run_playbook(precheck_cmd(ctx.obj))\n if rc != 0:\n print_error_msg(\"Upgrade prechecks failed!!!\")\n sys.exit(1)\n print_success_msg(\"Upgrade prechecks ran successfully\")",
"def fetch(self):\r\n if self.wp_op is None: # If we were already doing a list or save, just restart the fetch without changing the operation\r\n self.wp_op = \"fetch\"\r\n self.master.waypoint_request_list_send()",
"def _check_unstake_result(self) -> None:\n balance_score = self.icx.get_balance(self.address) - self._daily_reward.get()\n if balance_score > 0:\n unstake_info_list = self.getUserUnstakeInfo()\n for each_info in unstake_info_list:\n value_to_transfer = each_info[0]\n if value_to_transfer <= balance_score:\n self._send_ICX(each_info[1], value_to_transfer)\n self._linked_list_var.remove(self._linked_list_var._head_id.get())\n break",
"def run(self):\n\n if self.has_unsent_data:\n if self.pms.set_data(self.unsent_data[0], self.unsent_data[1]):\n self.has_unsent_data = False\n return\n if self.has_data:\n temp = self.mint.get_highest_pri(self.data[1])\n if temp == \"empty\": # hardcoded response\n temp = [self.data[0], \"tudulu\"] # tudulu has no meaning, placeholder at most\n if temp[0] == int(self.data[0]): # if answer piazzaid is equal to current piazzaid, then ignore\n self.has_data = False\n return\n if not self.pms.set_data(self.data[0], temp[0]):\n self.has_unsent_data = True\n self.unsent_data = [self.data[0], temp[0]]\n self.has_data = False"
] | [
"0.6297387",
"0.52683717",
"0.51722956",
"0.51680756",
"0.5077127",
"0.50706697",
"0.5068419",
"0.50682193",
"0.5056422",
"0.5022585",
"0.4991616",
"0.4961541",
"0.49071103",
"0.49062324",
"0.48711935",
"0.4865397",
"0.48612118",
"0.48573983",
"0.48536915",
"0.4841989",
"0.48383233",
"0.48313102",
"0.48294592",
"0.4828004",
"0.48181468",
"0.4817654",
"0.4811173",
"0.48096144",
"0.4801211",
"0.47772542"
] | 0.7185964 | 0 |
Continue getting updates until a successfulpayment is received. | def __wait_for_successfulpayment(self,
cancellable: bool = False) -> Union[telegram.SuccessfulPayment, CancelSignal]:
log.debug("Waiting for a SuccessfulPayment...")
while True:
# Get the next update
update = self.__receive_next_update()
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update contains a message
if update.message is None:
continue
# Ensure the message is a successfulpayment
if update.message.successful_payment is None:
continue
# Return the successfulpayment
return update.message.successful_payment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def awaiting_payment(self):",
"def webhook_payment_successful(self, event):\n\n intent = event.data.object\n p_id = intent.id\n pack = intent.metadata.pack\n save_detail = intent.metadata.save_detail\n\n billing_details = intent.charges.data[0].billing_details\n shipping_details = intent.shipping\n grand_cost = round(intent.charges.data[0].amount / 100, 2)\n\n for field, value in shipping_details.address.items():\n if value == \"\":\n shipping_details.address[field] = None\n\n profile = None\n username = intent.metadata.username\n if username != 'AnonymousUser':\n profile = UserProfile.objects.get(user__username=username)\n if save_detail:\n profile.default_phone_number = shipping_details.phone,\n profile.default_home_Address = shipping_details.address.line1,\n profile.default_home_Address_continued = \\\n shipping_details.address.line2,\n profile.default_postcode = \\\n shipping_details.address.postal_code,\n profile.default_county = \\\n shipping_details.address.city,\n profile.default_country = \\\n shipping_details.address.country,\n profile.save()\n\n order_present = False\n seek = 1\n while seek <= 6:\n try:\n order = Order.objects.get(\n Name__iexact=shipping_details.name,\n user_account=profile,\n email__iexact=billing_details.email,\n phone_number__iexact=shipping_details.phone,\n home_Address__iexact=shipping_details.address.line1,\n home_Address_continued__iexact =(\n shipping_details.address.line2\n ),\n postcode__iexact=shipping_details.address.postal_code,\n county__iexact=shipping_details.address.city,\n country__iexact=shipping_details.address.country,\n grand_cost=grand_cost,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n order_present = True\n break\n except Order.DoesNotExist:\n seek += 1\n time.sleep(1)\n if order_present:\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | Good news. \\\n This is now in the database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n Name=shipping_details.name,\n email=billing_details.email,\n phone_number=shipping_details.phone,\n home_Address=shipping_details.address.line1,\n home_Address_continued=shipping_details.address.line2,\n postcode=shipping_details.address.postal_code,\n county=shipping_details.address.city,\n country=shipping_details.address.country,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n for item_id, item_data in json.load(pack).items():\n product = Product.objects.get(id=item_id)\n if isinstance(item_data, int):\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=item_data,\n )\n order_line_item.save()\n else:\n for size, quantity in item_data['items_by_size'].items():\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=quantity,\n product_size=size,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n There is an error: {e}',\n status=500)\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n Goodnews: webhook order created',\n status=200)",
"def completed(payment_id):\n epay = PaymentProcessor.epay\n EpayPayment = apps.get_model('epay', 'EpayPayment')\n with transaction.atomic():\n epay_payment = EpayPayment.objects.select_related('payment').get(payment_id=payment_id)\n payment = epay_payment.payment\n epay.capture(\n payment_id, payment.amount, epay_payment.approval_code,\n epay_payment.reference, currency=payment.currency)\n\n # epay_payment.change_status(\"caputred\")\n epay_payment.update_from_kkb()\n return epay_payment",
"def _onSuccess(self, controller):\r\n if controller.order.paid_in_full:\r\n controller.cart.empty()\r\n for item in controller.order.orderitem_set.all():\r\n if item.product.is_subscription:\r\n item.completed = True\r\n item.save()\r\n try:\r\n curr_status = controller.order.orderstatus_set.latest() \r\n except OrderStatus.DoesNotExist:\r\n curr_status = None\r\n \r\n if (curr_status is None) or (curr_status.notes and curr_status.status == \"New\"):\r\n controller.order.add_status(status='New', notes = \"Order successfully submitted\")\r\n else:\r\n # otherwise just update and save\r\n if not curr_status.notes:\r\n curr_status.notes = _(\"Order successfully submitted\")\r\n curr_status.save() \r\n\r\n #Redirect to the success page\r\n url = controller.lookup_url('satchmo_checkout-success')\r\n return HttpResponseRedirect(url) \r\n\r\n else:\r\n log.debug('Order #%i not paid in full, sending to pay rest of balance', controller.order.id)\r\n #url = controller.order.get_balance_remaining_url()\r\n url = reverse('satchmo_balance_remaining')\r\n return HttpResponseRedirect(url)",
"def paynow_return(request, payment_id):\r\n # Get payment object\r\n payment = get_object_or_404(PaynowPayment, reference=payment_id)\r\n # Init Paynow oject. The urls can now be blank\r\n paynow = Paynow(settings.PAYNOW_INTEGRATION_ID, settings.PAYNOW_INTEGRATION_KEY, '', '')\r\n\r\n # Check the status of the payment with the paynow server\r\n payment_result = paynow.check_transaction_status(payment.poll_url)\r\n\r\n save_changes = False\r\n\r\n # check if status has changed\r\n if payment.status != payment_result.status:\r\n payment.status = payment_result.status\r\n save_changes = True\r\n\r\n # Check if paynow reference has changed\r\n if payment.paynow_reference != payment_result.paynow_reference:\r\n payment.paynow_reference = payment_result.paynow_reference\r\n save_changes = True\r\n\r\n # Check if payment is now paid\r\n print(payment_result.paid)\r\n if payment_result.paid:\r\n if not payment.paid:\r\n payment.paid = True\r\n payment.confirmed_at = timezone.now()\r\n\r\n if save_changes:\r\n payment.save()\r\n\r\n msg = \"Payment for Transaction \" + payment.reference + ' confirmed'\r\n msg += \" Paynow Reference: \" + payment.paynow_reference\r\n messages.success(request, msg)\r\n msg = \"Paynow Payment status => \" + payment.status\r\n messages.success(request, msg)\r\n\r\n\r\n\r\n\r\n return redirect(reverse('index'))",
"def run_now(request):\n rp_id = request.POST.get('rp_id')\n rp = get_object_or_404(RecurringPayment, pk=rp_id)\n\n result_data = {}\n result_data['processed'] = 'false'\n result_data['reason'] = 'done'\n\n payment_profiles = PaymentProfile.objects.filter(\n customer_profile_id=rp.customer_profile_id,\n status=True,\n status_detail='active'\n ).order_by('-update_dt')\n if not payment_profiles:\n valid_cpp_ids, invalid_cpp_ids = rp.populate_payment_profile()\n #print valid_cpp_ids, invalid_cpp_ids\n\n if valid_cpp_ids:\n payment_profiles = PaymentProfile.objects.filter(\n customer_profile_id=valid_cpp_ids[0])\n\n if not payment_profiles:\n result_data['reason'] = 'not setup'\n else:\n if rp.status_detail == 'active':\n num_processed = run_a_recurring_payment(rp)\n if num_processed:\n result_data['processed'] = 'true'\n result_data['reason'] = 'processed'\n # get total_paid and balance for this rp\n result_data['total_paid'] = str(rp.total_paid)\n result_data['balance'] = str(rp.get_outstanding_balance())\n\n # get total amount received for all rps\n d = RecurringPaymentInvoice.objects.filter(\n invoice__balance=0,\n ).aggregate(total_amount_received=Sum('invoice__total'))\n result_data['total_amount_received'] = d['total_amount_received']\n if not result_data['total_amount_received']:\n result_data['total_amount_received'] = 0\n result_data['total_amount_received'] = tcurrency(result_data['total_amount_received'])\n\n return HttpResponse(simplejson.dumps(result_data))",
"def paynow_update(request, payment_reference):\r\n\r\n # Get saved paymend details\r\n payment = get_object_or_404(PaynowPayment, reference=payment_reference)\r\n # Init paynow object. The URLS can be blank\r\n paynow = Paynow(settings.PAYNOW_INTEGRATION_ID, settings.PAYNOW_INTEGRATION_KEY, '', '')\r\n # Check the status of the payment with paynow server\r\n payment_result = paynow.check_transaction_status(payment.poll_url)\r\n\r\n save_changes = False\r\n\r\n # check if status has changed\r\n if payment.status != payment_result.status:\r\n payment.status = payment_result.status\r\n save_changes = True\r\n\r\n # Check if paynow reference has changed\r\n if payment.paynow_reference != payment_result.paynow_reference:\r\n payment.paynow_reference = payment_result.paynow_reference\r\n save_changes = True\r\n\r\n # Check if payment is now paid\r\n if payment_result.paid:\r\n if not payment.paid:\r\n payment.paid = True\r\n payment.confirmed_at = timezone.now()\r\n\r\n if save_changes:\r\n payment.save()\r\n\r\n return HttpResponse('ok')",
"def payment_success(request):\r\n\tsecret_key = settings.SELLER_KEY\r\n\tpid = request.GET['pid']\r\n\tref = request.GET['ref']\r\n\tresult = request.GET['result']\r\n\t# Retrieve the cheksum value and validate it\r\n\tchecksumstr = \"pid={}&ref={}&result={}&token={}\".format(pid, ref, result, secret_key)\r\n\tm = md5(checksumstr.encode(\"ascii\"))\r\n\tchecksum = m.hexdigest()\r\n\tmalformed = False\r\n\tprint(\"calculated: \" + checksum)\r\n\tprint(\"received: \" + request.GET['checksum'] )\r\n\tif (checksum == request.GET['checksum'] ):\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.state = Transaction.CONFIRMED\r\n\t\ttransaction.reference = ref\r\n\t\tgame = Game.objects.get(id = transaction.game.id)\r\n\t\ttransaction.save()\r\n\t\tinc_purchase = game.purchase_number + 1\r\n\t\tgame.purchase_number = inc_purchase\r\n\t\tgame.save()\r\n\t\tprint(\"about to call success\")\r\n\t\treturn render(request, 'success.html', {'game': game, 'MEDIA_URL': settings.MEDIA_URL, 'malformed': malformed})\r\n\telse:\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.delete()\r\n\t\tmalformed = True\r\n\t\treturn render(request, 'success.html', {\"malformed\": malformed})",
"def handle_payment_intent_succeeded(self, event):\n intent = event.data.object\n pid = intent.id\n bag = intent.metadata.bag\n\n billing_details = intent.charges.data[0].billing_details\n grand_total = round(intent.charges.data[0].amount / 100, 2)\n\n order_exists = False\n attempt = 1\n while attempt <= 5:\n try:\n order = Order.objects.get(\n full_name__iexact=billing_details.name,\n email__iexact=billing_details.email,\n phone_number__iexact=billing_details.phone,\n street_address1__iexact=(\n billing_details.address.line1),\n street_address2__iexact=(\n billing_details.address.line2),\n town_or_city__iexact=billing_details.address.city,\n county__iexact=billing_details.address.state,\n country__iexact=billing_details.address.country,\n grand_total=grand_total,\n original_bag=bag,\n stripe_pid=pid,\n )\n order_exists = True\n break\n except Order.DoesNotExist:\n attempt += 1\n time.sleep(1)\n\n if order_exists:\n return HttpResponse(\n content=f'Webhook received: ({event[\"type\"]}'\n '| SUCCESS: Verified order already in database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n full_name=billing_details.name,\n email=billing_details.email,\n phone_number=billing_details.phone,\n street_address1=billing_details.address.line1,\n street_address2=billing_details.address.line2,\n town_or_city=billing_details.address.city,\n county=billing_details.state,\n country=billing_details.country,\n original_bag=bag,\n stripe_pid=pid,\n )\n for workshop_id, quantity in json.loads(bag).items():\n workshop = Workshop.objects.get(id=workshop_id)\n if isinstance(quantity, int):\n order_line_item = OrderLineItem(\n order=order,\n workshop=workshop,\n quantity=quantity,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]} | ERROR: {e}',\n status=500)\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]}'\n '| SUCCESS: Created order in webhook',\n status=200)",
"def update_payment(self):\r\n update_payment_to_db(self.__payment_id__, self.__camper_id__, self.__camp_id__, self.__payment_date__, self.__paid_amount__)",
"async def create_payment_loop(self):\n while True:\n try:\n if not self.blockchain_state[\"sync\"][\"synced\"]:\n self.log.warning(\"Not synced, waiting\")\n await asyncio.sleep(60)\n continue\n\n if self.pending_payments.qsize() != 0:\n self.log.warning(f\"Pending payments ({self.pending_payments.qsize()}), waiting\")\n await asyncio.sleep(60)\n continue\n\n self.log.info(\"Starting to create payment\")\n\n coin_records: List[CoinRecord] = await self.node_rpc_client.get_coin_records_by_puzzle_hash(\n self.default_target_puzzle_hash, include_spent_coins=False,\n start_height=self.scan_start_height,\n )\n\n if len(coin_records) == 0:\n self.log.info(\"No funds to distribute.\")\n await asyncio.sleep(120)\n continue\n\n total_amount_claimed = sum([c.coin.amount for c in coin_records])\n pool_coin_amount = int(total_amount_claimed * self.pool_fee)\n amount_to_distribute = total_amount_claimed - pool_coin_amount\n\n self.log.info(f\"Total amount claimed: {total_amount_claimed / (10 ** 12)}\")\n self.log.info(f\"Pool coin amount (includes blockchain fee) {pool_coin_amount / (10 ** 12)}\")\n self.log.info(f\"Total amount to distribute: {amount_to_distribute / (10 ** 12)}\")\n\n async with self.store.lock:\n # Get the points of each farmer, as well as payout instructions. Here a chia address is used,\n # but other blockchain addresses can also be used.\n points_and_ph: List[\n Tuple[uint64, bytes, bytes]\n ] = await self.store.get_farmer_points_and_payout_instructions()\n total_points = sum([pt for (pt, ph, la) in points_and_ph])\n if total_points > 0:\n mojo_per_point = floor(amount_to_distribute / total_points)\n self.log.info(f\"Paying out {mojo_per_point} mojo / point\")\n\n additions_sub_list: List[Dict] = [\n {\"puzzle_hash\": self.pool_fee_puzzle_hash, \"amount\": pool_coin_amount,\n \"launcher_id\": self.default_target_puzzle_hash, \"points\": 0}\n ]\n for points, ph, launcher in points_and_ph:\n if points > 0:\n additions_sub_list.append(\n {\"puzzle_hash\": ph, \"amount\": points * mojo_per_point, \"launcher_id\": launcher, \"points\": points})\n\n if len(additions_sub_list) == self.max_additions_per_transaction:\n await self.pending_payments.put(additions_sub_list.copy())\n self.log.info(f\"Will make payments: {additions_sub_list}\")\n additions_sub_list = []\n\n if len(additions_sub_list) > 0:\n self.log.info(f\"Will make payments: {additions_sub_list}\")\n await self.pending_payments.put(additions_sub_list.copy())\n\n # keep a snapshot of the points collected by the farmer\n await self.store.snapshot_farmer_points(1)\n\n # Subtract the points from each farmer\n await self.store.clear_farmer_points()\n\n else:\n self.log.info(f\"No points for any farmer. Waiting {self.payment_interval}\")\n\n await asyncio.sleep(self.payment_interval)\n except asyncio.CancelledError:\n self.log.info(\"Cancelled create_payments_loop, closing\")\n return\n except Exception as e:\n error_stack = traceback.format_exc()\n self.log.error(f\"Unexpected error in create_payments_loop: {e} {error_stack}\")\n await asyncio.sleep(self.payment_interval)",
"def paymentComplete(self, id, chargeID):\n return self.__insertOrderHistory(id, \"paid\", {\"stripeChargeID\": chargeID})",
"def update(self):\n try:\n\n # 1 --> Get all the NewValidTransaction(s)\n new_valid_txns = self.frame.get(NewValidTransaction)\n\n for txn in new_valid_txns:\n\n # 2 --> Update the BankingRecord corresponding to Customer that initiated it\n self.atm.update_banking_record(txn, self.frame)\n\n # 3 --> Process the Transaction \n self.atm.process_transaction(txn, self.frame)\n\n # ~ Print based on a cycle count (optional functionality)\n self.atm.print_using_base(10, self.frame, Customer)\n #self.atm.print_using_base(10, self.frame, BankingRecord)\n\n except Exception:\n logger.exception(\"Error: \")",
"def proceed_to_checkout_and_payment(self):\r\n # 1- summary\r\n logger.info('starting wizard with summary')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.cart_navigation a.standard-checkout')))\r\n self.automation.driver.execute_script(\"document.querySelectorAll('.cart_navigation a.standard-checkout')[0]\"\r\n \".click()\")\r\n\r\n # 2-sign in & 3-address\r\n logger.info('2-sign in & 3-address')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'button[name=\"processAddress\"]')))\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=\"processAddress\"]').click()\r\n\r\n # 4- shipping\r\n logger.info('4- shipping')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#uniform-cgv span')))\r\n\r\n is_checked = self.automation.driver.find_element_by_css_selector('#uniform-cgv span').get_attribute('class')\r\n if not is_checked: # agree\r\n self.automation.driver.execute_script(\"document.querySelectorAll('#cgv')[0].click()\")\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=processCarrier]').click()\r\n logger.info('agree and confirmed')\r\n\r\n # pay by bank wire\r\n logger.info('pay by bank wire')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.payment_module a')))\r\n\r\n self.automation.driver.find_element_by_css_selector('.payment_module a').click()\r\n\r\n # 5- payment and confirm\r\n logger.info('5- payment and confirm')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#cart_navigation button')))\r\n self.automation.driver.find_element_by_css_selector('#cart_navigation button').click()\r\n\r\n # back to orders\r\n logger.info('back to orders')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'p.cart_navigation .button-exclusive.btn')))\r\n self.automation.driver.find_element_by_css_selector('p.cart_navigation .button-exclusive.btn').click()\r\n\r\n # how many items do you have\r\n time.sleep(1.5)\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#order-list tbody tr')))\r\n items = self.automation.driver.find_elements_by_css_selector('#order-list tbody tr')\r\n logger.info(f'You have \"{len(items)}\" at your order')",
"def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))",
"def acknowledge_prepayment(self):\n self.acknowledge_payment()",
"def update_payment_status(self):\n payments = Payment.query.filter_by(invoice_id=self.id).all()\n total = 0.0\n for payment in payments:\n total += float(payment.amount)\n\n if total >= self.total:\n self.payment_status = u'paid'\n else:\n self.payment_status = u'unpaid'\n\n db.session.add(self)\n db.session.commit()\n\n return False",
"def proceed(self):\n pass",
"def test_successful_payment(self):\n # Make payment.\n amount = 10\n data = {\n 'from_account': self.from_account.id,\n 'to_account': self.to_account.id,\n 'amount': amount,\n }\n response = self.client.post(\n self.payments_list_url, data=data, format='json'\n )\n self.assertEqual(\n response.status_code, status.HTTP_201_CREATED, response.data\n )\n payment_id = response.data['id']\n self.assertTrue(Payment.objects.filter(pk=payment_id).exists())\n\n # Make sure that payments is available through readonly endpoints.\n response = self.client.get(self.payments_list_url)\n self.assertEqual(\n response.status_code, status.HTTP_200_OK, response.data\n )\n self.assertEqual(len(response.data), 1)\n\n detail_url = reverse('payments:payment-detail', args=[payment_id])\n response = self.client.get(detail_url)\n self.assertEqual(\n response.status_code, status.HTTP_200_OK, response.data\n )\n self.assertEqual(response.data['from_account'], self.from_account.id)\n self.assertEqual(response.data['to_account'], self.to_account.id)\n self.assertEqual(Decimal(response.data['amount']), amount)\n\n # Make sure that funds are moved from \"bob123\" to \"alice456\".\n original_amount = self.from_account.balance\n self.from_account.refresh_from_db()\n self.assertEqual(self.from_account.balance, original_amount - amount)\n original_amount = self.to_account.balance\n self.to_account.refresh_from_db()\n self.assertEqual(self.to_account.balance, original_amount + amount)",
"def notify_payment_success(self, **kwargs):\n return self.notify(\"notify_payment_success\", **kwargs)",
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment update successfull\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment update failed\")\n return self.form_invalid(form)",
"def update_paypal(sender, **kwargs):\n ipn_obj = sender\n try:\n payment = json.loads(ipn_obj.custom)\n\n # try to get payment. if not exist, exception will be catched\n p = Payment.objects.filter(id=payment.get('id'), token=payment.get('token')).get()\n\n # update payment\n p.method = constants.PAYPAL\n p.ipn = ipn_obj\n p.save()\n\n # if payment is completed, so valid\n if ipn_obj.payment_status == ST_PP_COMPLETED:\n # check correct price , currency and mail\n if int(ipn_obj.mc_gross) == int(p.price.price) and \\\n ipn_obj.mc_currency == 'EUR' and \\\n ipn_obj.business == settings.PAYPAL_RECEIVER_EMAIL:\n # all is OK, update state\n p.state = True\n p.save()\n sendmail_payment_success(p)\n else:\n # TODO: send alert / mail\n return\n except Payment.DoesNotExist:\n # TODO: send alert / mail\n pass\n except:\n # TODO: send alert / mail\n pass",
"def post(self):\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in (\n 'outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()",
"def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)",
"def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)",
"def update(self):\n return self._api.update_payment_gateway(**to_dict(self))",
"def done(self,cr,uid,ids,context=None):\n for record in self.browse(cr, uid, ids, context=context):\n search_result = self.pool.get('payment.enrich').browse(cr, uid,record.enrich_id.id)\n if record.cost < 1 :\n raise osv.except_osv(_('Invalid Action Error'), _('The Entered Cost Is Wrong!'))\n if record.cost > search_result.residual_amount :\n raise osv.except_osv(_('Invalid Action Error'), _('Your Residual Balance Is Less Than Your Cost!'))\n if context:\n if 'owner' in context and 'model_id' in context:\n owner = context['owner']\n owner = int(owner)\n model_id = context['model_id']\n if str(model_id) == 'fleet.vehicle.log.contract':\n model_obj = self.pool.get(model_id)\n model = model_obj.browse(cr, uid, owner, context=context)\n model.write({'state':'open'})\n return self.write(cr, uid, ids, {'state':'done'},context=context)",
"def update_done(self, scan_id):\n pass",
"def post_update(self):\n\t\tlogging.info(\"Beginning\")\n\t\toptions=dict(\n\t\t\tapi_key = self.apiKey\n\t\t)\n\t\tcounter = 0\n\t\tfor key, value in self.field.items():\n\t\t\tif value != None:\n\t\t\t\tcounter += 1\n\t\t\t\toptions[key] = value\n\t\tif counter == 0:\n\t\t\tlogging.error(\"There was nothing to update. Check the field values\")\n\t\t\treturn\n\t\turl = '{ts}update'.format(\n\t\t\tts=self.tsRUL,\n\t\t)\n\t\tlogging.debug(\"Options = \" + str(options))\n\t\ttry:\n\t\t\tresults = requests.post(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The update failed\")\n\t\t\t\treturn False\n\t\texcept:\n\t\t\tlogging.error(\"There was an error trying to update the values\")\n\t\t\treturn False\n\t\tself.clear_field_values()\n\t\treturn True",
"def waitUntilSuccess():"
] | [
"0.7672672",
"0.6573215",
"0.65442157",
"0.6435054",
"0.6081593",
"0.60590357",
"0.60243624",
"0.6005645",
"0.59923875",
"0.5971794",
"0.5949517",
"0.59342396",
"0.5882962",
"0.58690065",
"0.5868896",
"0.5814729",
"0.5801393",
"0.5796031",
"0.5784956",
"0.5747631",
"0.5716991",
"0.5647845",
"0.56191415",
"0.5606736",
"0.559069",
"0.553922",
"0.5526043",
"0.5509949",
"0.5490383",
"0.54833454"
] | 0.67208254 | 1 |
Continue getting updates until a photo is received, then return it. | def __wait_for_photo(self, cancellable: bool = False) -> Union[List[telegram.PhotoSize], CancelSignal]:
log.debug("Waiting for a photo...")
while True:
# Get the next update
update = self.__receive_next_update()
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update contains a message
if update.message is None:
continue
# Ensure the message contains a photo
if update.message.photo is None:
continue
# Return the photo array
return update.message.photo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_img(self):\n img_old = self.img\n #Ensure at least one image has been captured\n attempts = 0\n while img_old == None:\n print(\"Wating to capture first image...\")\n time.sleep(1)\n img_old = self.img\n attempts = attempts + 1\n if attempts == 10:\n raise Exception('No images captured after 10 attempts, aborting.')\n img_new = img_old\n #wait until new image is captured\n print('Waiting for new image...')\n while np.all(img_old == img_new):\n img_new = self.img\n print('New image acquired.')\n \n return img_new",
"def take_photo(self):\n\n status = self.camera.status()\n if status['mode'] != 'still':\n # place camera in snapshot mode\n self.camera.command('mode', 'still')\n\n photo_successful = self.camera.command('record', 'on')\n\n if photo_successful:\n\n # sleep for two seconds so the camera can process\n # and serve the new photo via http\n\n retrieved = False\n while not retrieved:\n print(\"Waiting for image to be served.\")\n time.sleep(2)\n retrieved = self.get_photos_from_device()\n\n print(\"Image got served.\")\n return True\n\n else:\n return False",
"def on_photo(self, update, context):\n user = update.effective_user\n photo_count = len(update.message.photo)\n log.info(\n \"PIC from %s, %s, @%s, #%i\",\n user.username,\n user.full_name,\n update.effective_chat.id,\n photo_count,\n )\n\n if context.user_data[\"state\"] != c.State.EXPECTING_RECEIPT:\n # Got an image from someone we weren't expecting to send any. We log this, and TODO decide what\n log.debug(\"Got image when I was not expecting one\")\n return\n\n # Process each photo\n for entry in update.message.photo:\n raw_image = entry.get_file().download_as_bytearray()\n\n # At this point the image is in the memory\n with NamedTemporaryFile(delete=False, prefix=str(update.effective_chat.id)) as pic:\n pic.write(raw_image)\n log.debug(\"Image written to %s\", pic.name)\n\n # Note: you can disable this line when testing locally, if you don't have an actual backend that will\n # serve this request\n self.backend.upload_shopping_receipt(raw_image, context.user_data[\"current_request\"])\n\n # if we got this far it means that we're ready to proceed to the exit survey and ask some additional questions\n # about this request\n self.send_exit_survey(update, context)\n context.user_data[\"state\"] = c.State.EXPECTING_EXIT_SURVEY",
"async def async_camera_image(self) -> bytes:\n websession = async_get_clientsession(self.hass)\n\n with async_timeout.timeout(10):\n response = await websession.get(self._latest_url)\n\n image = await response.read()\n return image",
"def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)",
"def _telegram_photo_callback(self, update: Update, _: CallbackContext):\n rospy.logdebug(\"Received image, downloading highest resolution image ...\")\n byte_array = update.message.photo[-1].get_file().download_as_bytearray()\n rospy.logdebug(\"Download complete, publishing ...\")\n\n img = cv2.imdecode(np.asarray(byte_array, dtype=np.uint8), cv2.IMREAD_COLOR)\n msg = self._cv_bridge.cv2_to_imgmsg(img, encoding=\"bgr8\")\n msg.header.stamp = rospy.Time.now()\n\n if self._caption_as_frame_id:\n msg.header.frame_id = update.message.caption\n self._from_telegram_image_publisher.publish(msg)\n\n if update.message.caption:\n self._from_telegram_string_publisher.publish(String(data=update.message.caption))",
"async def async_camera_image(self):\n last_image = self._nvr.get_snapshot_image(self._uuid)\n self._last_image = last_image\n return self._last_image",
"def get_photo(self, i):\r\n return self.__photos[i]",
"def photo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n photo_file = update.message.photo[-1].get_file()\n x = \".jpg\"\n z = user.first_name + x\n photo_file.download(z)\n logger.info(\"Photo of %s: %s\", user.first_name, 'user_photo.jpg')\n update.message.reply_text(\n 'Che bella foto! ora, mandami la tua posizione se puoi, o scrivi /skip se non vuoi farlo.'\n )\n\n return LOCATION",
"def read(self):\n with self.lock:\n return self.image",
"def get(self):\n\t\tif not self.threaded:\n\t\t\tself.record()\n\t\timg = self.Video[-1]\n\t\ttime = self.timestamps[-1]\n\t\tif self.newAvailable:\n\t\t\tnew = True\n\t\t\tself.newAvailable = False\n\t\t\treturn new, img, time\n\t\telse:\n\t\t\tnew = False\n\t\t\treturn new, img, time",
"def __next__(self):\n while True:\n self.stream_bytes += self.stream_conn.read(1024)\n first = bytearray(self.stream_bytes).find(b'\\xff\\xd8')\n last = bytearray(self.stream_bytes).find(b'\\xff\\xd9')\n if first != -1 and last != -1:\n jpg = self.stream_bytes[first:last + 2]\n self.stream_bytes = self.stream_bytes[last + 2:]\n image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), 0)\n self.total_frame += 1\n return image",
"def __photo_handler(self, update, context):\n trigger = Constructor.PHOTO_TRIGGER\n self.__handler(context, update, trigger)",
"def update(self):\n while not self.stopped:\n time.sleep(0.01)\n self.grab_image()",
"def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image",
"async def _async_request_image(\n self, request_method: Callable[[], Coroutine[Any, Any, None]]\n ) -> bytes | None:\n if not self.available:\n return None\n image_future = self._loop.create_future()\n self._image_futures.append(image_future)\n await request_method()\n if not await image_future:\n return None\n return self._state.data",
"def command_photo(self, bot, update):\n\n self.send_message(bot, update, \"Not implemented yet.\")",
"def send_photo_url(self, bot, update, url):\n\n resp = requests.get(url)\n return self.send_photo(bot, update, StringIO(resp.content))",
"def camera_image(self):\n return asyncio.run_coroutine_threadsafe(\n self.async_camera_image(), self.hass.loop\n ).result()",
"async def grab(self):\r\n # TODO probe the system for optimal size\r\n await self.configure_acquisition(100, continuous=True)\r\n\r\n self.start_acquisition()\r\n with trio.CancelScope():\r\n while True:\r\n yield await self.get_image(mode=BufferRetrieveMode.Latest, copy=False)\r\n self.stop_acquisition()\r\n\r\n await self.unconfigure_acquisition()",
"def fetch(hashed_image_path, info, resp, lock=None):\n\n fetched = 0\n info['version'] += 1\n info['fetched_at'] = email.utils.formatdate()\n for field in VALIDATED_IMAGE_FIELDS:\n info[field] = resp.headers.get(field)\n\n last_lock_refresh = 0\n with open(hashed_image_path + '.v%03d' % info['version'], 'wb') as f:\n for chunk in resp.iter_content(chunk_size=8192):\n fetched += len(chunk)\n f.write(chunk)\n if lock and (time.time() - last_lock_refresh > 10):\n lock.refresh()\n last_lock_refresh = time.time()\n\n if fetched > 0:\n with open(hashed_image_path + '.info', 'w') as f:\n f.write(json.dumps(info, indent=4, sort_keys=True))\n\n LOG.info('Fetching image %s complete (%d bytes)' %\n (info['url'], fetched))\n\n # Decompress if required\n if info['url'].endswith('.gz'):\n if not os.path.exists(hashed_image_path + '.v%03d.orig' % info['version']):\n if lock:\n lock.refresh()\n\n processutils.execute(\n 'gunzip -k -q -c %(img)s > %(img)s.orig' % {\n 'img': hashed_image_path + '.v%03d' % info['version']},\n shell=True)\n return '%s.v%03d.orig' % (hashed_image_path, info['version'])\n\n return '%s.v%03d' % (hashed_image_path, info['version'])",
"def __image_request_handler(self):\n self.__logger.info(\"Image Request Handling Thread started\")\n ticker = Ticker(2)\n while self._running:\n timeout = ticker.end_tick(False)\n try:\n task = self.__image_queue.get(True, timeout)\n except Queue.Empty:\n ticker.start_tick()\n continue\n\n # There is a task to process\n ticker.start_tick()\n source, connection = task\n\n # Check if the connection has been closed. If it was,\n # do not bother processing the request.\n if not connection.connected():\n self.__logger.info(\"Skipping request for image of source %s\" \\\n \" because requesting client disconnected\" \\\n % source)\n self.__image_queue.task_done()\n continue \n\n # Obtain new image\n error = \"No image available\"\n image = None\n mtime = time.time()\n if source in self.__video_modules:\n try:\n mtime, image = self.__get_image(source)\n except Exception as err:\n error = \"Obtaining image failed: %s\" % repr(err)\n else:\n error = \"Video source %s has not been started\" % source\n\n if connection.connected():\n if image:\n # Valid image was obtained\n img_str = image.tostring()\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'shape': (image.width, image.height),\n 'depth': image.depth,\n 'nChannels': image.nChannels}\n else:\n # An error occured, notify the vision module\n self.__logger.info(\"Failed to obtain image for source %s. \"\\\n \" Error message: %s\" % (source, error))\n img_str = \"\"\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'error': error}\n # Send the data to the vision module.\n if not connection.sendall(data, img_str):\n self.__logger.warning(\"Failed to send data to client. \" \\\n \"Probably disconnected\")\n else:\n self.__logger.info(\"Image of source %s obtained but not \" \\\n \"sending because requesting client \" \\\n \"disconnected\" % source)\n self.__image_queue.task_done()\n self.__logger.info(\"Image Request Handling Thread ended\")",
"def update_completed(self, update : int) -> NoReturn:\n\n self.mutex_downloaded.acquire()\n self.downloaded_images += update\n self.mutex_downloaded.release()",
"def __get_img(self):\n # Read camera image\n while True:\n # Wait for prediction\n if not self.__predict_start:\n continue\n\n # Get current frame and\n # check for success\n success, self.__img = self.__cap.read()\n if not success:\n continue\n\n self.__img = cv2.resize(self.__img, (self.__size[0], self.__size[1]))",
"def take_photo(self):\n self.photo = self.frame\n self.send_photo_to_model.emit(self.photo)",
"def img_ok(update, send_msg=True):\n if update.message.document:\n if not update.message.document.mime_type.startswith(\"image\"):\n if send_msg:\n status_usr_msg(update=update, status='invalid', obj='image')\n img = None\n else:\n img = update.message.document\n else:\n img = update.message.photo[-1]\n if img:\n if img.file_size > MAX_FILESIZE_DOWNLOAD:\n if send_msg:\n status_usr_msg(update, 'too large', 'image')\n img = None\n if img and send_msg:\n status_usr_msg(update, 'ok', 'image')\n return img",
"def get_latest_valid_picture(self):\n return self.buffer[self.buffer_index]",
"def get_new_image(self):\n return self.vid_mem_reader.get_latest_image()[0]",
"def __get_image(self, source):\n if not source in self.__video_modules:\n return (None, None)\n with self.__video_locks[source]:\n last_time, last_img = self.__last_images[source]\n age = time.time() - last_time\n if age > 0.05:\n new_image = self.__video_modules[source].get_image()\n try:\n new_time = self.__video_modules[source].get_time()\n print \"Got time from ros: %f\" % new_time\n except:\n new_time = time.time()\n\n if new_image:\n last_time = new_time \n last_img = new_image\n self.__last_images[source] = (new_time, new_image)\n return (last_time, last_img)",
"def camera_acquire_image():\n from time import time\n from PIL import Image # Python Imaging Library\n \n camera = GigE_camera(parameter(\"camera.IP_addr\"),use_multicast=use_multicast)\n camera.last_timestamp = 0\n\n camera.start() \n t = time()\n while not camera.has_image or camera.timestamp == 0:\n if time()-t > 2.0 and not \"started\" in camera.state:\n log (\"camera_acquire_image: image unreadable (%s)\" % camera.state); break\n if time()-t > 5.0:\n log (\"camera_acquire_image: image acquistion timed out (%s)\" % camera.state); break\n sleep(0.1)\n\n camera.stop()\n\n debug(\"get_image: read image with %dx%d pixels, %d bytes\" %\n (camera.width,camera.height,len(camera.rgb_data)))\n image = Image.new('RGB',(camera.width,camera.height))\n image.fromstring(camera.rgb_data)\n image = rotated_image(image)\n return image"
] | [
"0.65705794",
"0.6327374",
"0.63064396",
"0.60424834",
"0.5937122",
"0.5898389",
"0.5863462",
"0.582088",
"0.58116835",
"0.57911056",
"0.5773972",
"0.57718194",
"0.5729892",
"0.57232875",
"0.5681439",
"0.5619149",
"0.5587965",
"0.5579192",
"0.5574852",
"0.5565938",
"0.5523706",
"0.54924494",
"0.5484169",
"0.54731137",
"0.5447273",
"0.54470116",
"0.54371655",
"0.5423838",
"0.5422946",
"0.5421332"
] | 0.78513974 | 0 |
Continue getting updates until an inline keyboard callback is received, then return it. | def __wait_for_inlinekeyboard_callback(self, cancellable: bool = False) \
-> Union[telegram.CallbackQuery, CancelSignal]:
log.debug("Waiting for a CallbackQuery...")
while True:
# Get the next update
update = self.__receive_next_update()
# If a CancelSignal is received...
if isinstance(update, CancelSignal):
# And the wait is cancellable...
if cancellable:
# Return the CancelSignal
return update
else:
# Ignore the signal
continue
# Ensure the update is a CallbackQuery
if update.callback_query is None:
continue
# Answer the callbackquery
self.bot.answer_callback_query(update.callback_query.id)
# Return the callbackquery
return update.callback_query | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detectKeyboard(self):\n self.runKeyboard()\n time.sleep(0.2)\n searching = True\n while searching:\n for dev in self.keyboards:\n if self.hitsKeyboards[dev] != False:\n return(dev, self.map(self.hitsKeyboards[dev]))\n time.sleep(0.01)",
"def wm_update(self):\n readback = self.get_pvobj(\"readback\")\n show_pos = self._update_cb(0)\n show_pos()\n with CallbackContext(readback, show_pos):\n try:\n while True:\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass",
"def key_wait():\n while 1:\n for event in get():\n if event.type == 'KEYDOWN':\n return event\n if event.type == 'QUIT':\n # convert QUIT into alt+F4\n return KeyDown('F4', '', True, False, True, False, False)\n _time.sleep(.001)",
"def keypress(self):\n k = self.__screen.getch()\n ret = None\n if k == curses.KEY_ENTER or (k < 256 and chr(k) == '\\n'):\n ret = self.__textPad.gather()\n self.__textWin.clear()\n else:\n self.__textPad.do_command(k)\n\n self.__update()\n return ret",
"def sync(self):\n available = self.count\n if available > 0:\n available = available + 2\n buf = self.read_keypad(available)\n for raw in buf:\n evt = KeyEvent(_seesaw_key((raw >> 2) & 0x3F), raw & 0x3)\n if (\n evt.number < _NEO_TRELLIS_NUM_KEYS\n and self.callbacks[evt.number] is not None\n ):\n self.callbacks[evt.number](evt)",
"def getInput(self):\n\t\tkeyPressed = self.screen.getch()\n\t\tif keyPressed == 113:\t\t# <escape>\n\t\t\tself.terminate()\n\t\t\tself.running = False\n\t\treturn keyPressed \t\t# return key for (possible) further action in calling program",
"def handle_keyboard_data(data):\n pass",
"def get_key():\n\tinput_key: str = \"\"\n\ttry:\n\t\twhile not False:\n\t\t\twith Raw(sys.stdin):\n\t\t\t\tif not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag\n\t\t\t\t\tcontinue\n\t\t\t\tinput_key += sys.stdin.read(1) #* Read 1 key safely with blocking on\n\t\t\t\tif input_key == \"\\033\": #* If first character is a escape sequence keep reading\n\t\t\t\t\twith Nonblocking(sys.stdin): #* Set non blocking to prevent read stall\n\t\t\t\t\t\tinput_key += sys.stdin.read(20)\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<\"):\n\t\t\t\t\t\t\t_ = sys.stdin.read(1000)\n\t\t\t\tprint(\"INPUT: \"+input_key.replace(\"\\033\",\"<ESC>\"))\n\t\t\t\tif input_key == \"\\033\" or input_key == \"q\": #* Key is \"escape\" key if only containing \\033\n\t\t\t\t\tbreak\n\t\t\t\telif input_key.startswith((\"\\033[<0;\", \"\\033[<35;\", \"\\033[<64;\", \"\\033[<65;\")): #* Detected mouse event\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint((int(input_key.split(\";\")[1]), int(input_key.split(\";\")[2].rstrip(\"mM\"))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<35;\"):\n\t\t\t\t\t\t\tprint(\"mouse Move\") #* Detected mouse move in mouse direct mode\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<64;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll UP\") #* Detected mouse scroll up\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<65;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll DOWN\") #* Detected mouse scroll down\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<0;\") and input_key.endswith(\"m\"):\n\t\t\t\t\t\t\tprint(\"mouse Click Release\") #* Detected mouse click release\n\t\t\t\tinput_key = \"\"\n\texcept Exception as e:\n\t\tprint(f'EXCEPTION: Input thread failed with exception: {e}')",
"def user_input(self):\n\n # Above, we set the timeout of getch() on entryscreen to 500ms. That means\n # that the invalid character (-1) is returned every 500 ms if the user\n # enters nothing, and our validator is called. We take this opportunity to\n # relese the curses lock so any other threads (e.g. the message handling\n # thread) have a chance to update the screen. Additionally, we call\n # update() so that any other changes are picked up. We raise _StoppedError\n # to get out of the surrounding loop in edit() so that we can exit this\n # function cleanly and without hijacking any other exceptions (such as\n # KeyboardInterrupt).\n\n class _StoppedError(Exception):\n pass\n\n def validator(ch):\n if ch == curses.KEY_RESIZE:\n self.chatscreen.clear()\n (y, x) = self.global_screen.getmaxyx()\n curses.resizeterm(y, x)\n self.chatscreen.resize(y-Chat.CHATBOX_SIZE, x)\n self.entryscreen.mvwin(y-Chat.CHATBOX_SIZE, 0)\n self.update()\n return None\n try:\n self.curses_lock.release()\n if not self.running:\n raise _StoppedError\n self.update() # has anything changed?\n if ch < 0:\n return None\n return ch\n finally:\n self.curses_lock.acquire()\n\n try:\n self.curses_lock.acquire()\n cmd = self.textpad.edit(validator)\n self.entryscreen.clear()\n except _StoppedError:\n return ''\n finally:\n self.curses_lock.release()\n\n # strip the newlines out of the middle of the words\n cmd = string.replace(cmd, '\\n', '')\n\n # remove unprintable characters\n cmd = (''.join(c if c in string.printable else '' for c in cmd)).strip()\n\n # process commands if necessary\n if cmd.startswith('/'):\n words = cmd.split()\n cmdname = words[0][1:]\n args = words[1:]\n\n if cmdname in self.commands:\n try:\n self.commands[cmdname](*args)\n except CommandError as e:\n self.message('System:', 'Problem executing command: ' + str(e))\n except TypeError as e:\n self.message('System:', str(e))\n else:\n self.message('System:', 'Unknown command: '+cmdname)\n else:\n # it's not a cmd so it must be a message to send\n self.q.put(cmd)\n self.update()",
"def on_feed_key(self, key_press):\n if key_press.key in {Keys.Escape, Keys.ControlC}:\n echo(carriage_return=True)\n raise Abort()\n if key_press.key == Keys.Backspace:\n if self.current_command_pos > 0:\n self.current_command_pos -= 1\n return key_press\n ret = None\n if key_press.key != Keys.CPRResponse:\n if self.current_command_pos < len(self.current_command):\n current_key = self.current_command_key\n ret = KeyPress(current_key)\n increment = min(\n [self.speed, len(self.current_command) - self.current_command_pos]\n )\n self.current_command_pos += increment\n else:\n # Command is finished, wait for Enter\n if key_press.key != Keys.Enter:\n return None\n self.current_command_index += 1\n self.current_command_pos = 0\n ret = key_press\n return ret",
"async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn",
"def get_next_signal(self):\n keypress = None\n\n while not keypress:\n #While no keypress received\n self.do_polling()\n if self.stream:\n keypress = self.get_stream()[0]\n time.sleep(0.01)\n\n return keypress",
"def getkey():\n\tglobal _s\n\twhile True:\n\t\te = _s.wait_event()\n\t\tif e.type == pygame.KEYDOWN and len(e.dict[\"unicode\"]) > 0:\n\t\t\treturn e.dict[\"unicode\"]",
"def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()",
"def get_input():\n return getch()",
"def keyboard(self, *args):\n return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)",
"def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()",
"def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1",
"def _reply_message(self, update, message, keyboard: List[List[str]] = None, inline_keyboard=False):\n if keyboard is not None:\n if not inline_keyboard:\n update.message.reply_text(message,\n reply_markup=ReplyKeyboardMarkup(\n keyboard=[[self.BACK]] + keyboard,\n one_time_keyboard=True))\n\n else:\n kybd = [[InlineKeyboardButton(lb, callback_data=lb) for lb in lst] for lst in keyboard]\n kybd = InlineKeyboardMarkup(inline_keyboard=kybd)\n update.message.reply_text(message, reply_markup=kybd)\n\n else:\n update.message.reply_text(message, reply_markup=ReplyKeyboardRemove())",
"def ev_keydown(self, event: KeyDown) -> None:",
"def wait_keydown(self):\n while True:\n self.clock.tick(self.fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n if event.type == pygame.KEYDOWN:\n return",
"def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()",
"def wait_for_input(self):\n pass",
"def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")",
"def receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n data = \"\"\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data",
"def getKey(self):\n while not rospy.is_shutdown():\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)\n time.sleep(.05)",
"def _on_key_press(self, event):",
"def next_(self, update=0):\n char = super().next_()\n\n if update == 1:\n self.highlight_current()\n elif update == 2:\n self._update_request.emit()\n\n return char",
"def getKeyboardInput(self, keysPressed):\n self.keyboardInput = keysPressed",
"def get_keystrokes(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n print(self.m_text)\n self.m_text = ''\n elif event.key == pygame.K_BACKSPACE:\n self.m_text = self.m_text[:-1]\n else:\n self.m_text += event.unicode"
] | [
"0.635834",
"0.61028296",
"0.57839763",
"0.572346",
"0.57095325",
"0.57065344",
"0.56824297",
"0.5674951",
"0.5604688",
"0.5577965",
"0.5554134",
"0.5550242",
"0.5487043",
"0.54515177",
"0.54401964",
"0.54355025",
"0.54113567",
"0.54003197",
"0.53757066",
"0.5371119",
"0.53569514",
"0.5351412",
"0.5341315",
"0.5328239",
"0.5323452",
"0.5321693",
"0.5313282",
"0.53049046",
"0.5303196",
"0.52797073"
] | 0.6559372 | 0 |
Select an user from the ones in the database. | def __user_select(self) -> Union[db.User, CancelSignal]:
log.debug("Waiting for a user selection...")
# Find all the users in the database
users = self.session.query(db.User).order_by(db.User.user_id).all()
# Create a list containing all the keyboard button strings
keyboard_buttons = [[self.loc.get("menu_all_cancel")]]
# Add to the list all the users
for user in users:
keyboard_buttons.append([user.identifiable_str()])
# Create the keyboard
keyboard = telegram.ReplyKeyboardMarkup(keyboard_buttons, one_time_keyboard=True)
# Keep asking until a result is returned
while True:
# Send the keyboard
self.bot.send_message(self.chat.id, self.loc.get("conversation_admin_select_user"), reply_markup=keyboard)
# Wait for a reply
reply = self.__wait_for_regex("user_([0-9]+)", cancellable=True)
# Propagate CancelSignals
if isinstance(reply, CancelSignal):
return reply
# Find the user in the database
user = self.session.query(db.User).filter_by(user_id=int(reply)).one_or_none()
# Ensure the user exists
if not user:
self.bot.send_message(self.chat.id, self.loc.get("error_user_does_not_exist"))
continue
return user | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_user(user_id):\n return session.query(User).filter(User.id == user_id).first()",
"def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result",
"def select_user(users):\n\n # Construct the model.\n model = [_User(name=name, description=description)\n for name, description in users]\n\n # Construct the view.\n view = _UsersView(model=model)\n\n if view.configure_traits() and view.selection is not None:\n user = view.selection.name, view.selection.description\n else:\n user = '', ''\n\n return user",
"def get_user(self):\n try:\n cursor = self.db.cursor()\n cursor.execute(\"SELECT username FROM users WHERE username=?\", (self.username,))\n return cursor.fetchall()\n except:\n print(\"Error obteniendo usuario\")",
"def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None",
"def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection",
"def get_user(self, key_value):\n if utils.is_numeric(key_value):\n key_value = int(key_value)\n query = TABELLE['users']['select']['from_id']\n else:\n if key_value[0] == '@':\n key_value = key_value[1:]\n query = TABELLE['users']['select']['from_username']\n user = self.execute(query, (key_value, key_value))\n return user",
"def load_user():\n\n return User.query.get(int(id))",
"def get_usr (conn, id):\n\n res = []\n\n try:\n csr = conn.cursor()\n\n cmd = \"SELECT * FROM {tbl} WHERE {col1} = {val1};\".\\\n format(tbl = _tbl_users,\n col1 = _tbl_users_col1, val1 = id)\n print(cmd)\n\n csr.execute(cmd)\n\n for row in csr:\n res.append(row)\n\n csr.close()\n\n except Exception as ex:\n print(\"Error - get_usr: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok, res",
"def select(self, user):\n i = 0\n for u in self.list:\n if (u[0] == user):\n self.combo.set_active(i)\n break\n i += 1",
"def load_user(id):\n return Users.query.get(id)",
"def get_one_user():",
"def get_random_user():\n return random.choice(User.query.all())",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(user_id):\n return Users.query.get(user_id)",
"def get_user_by_id(cur, id) -> str:\n cur.execute(f'''\n SELECT name FROM user WHERE id = {id} ''')\n return cur.fetchone()[0]",
"def get_user(current_user):\n for user in user_db:\n if user['email'] == current_user:\n return user",
"def load_user(id):\n\treturn User.query.get(int(id))",
"def load_user(user_id):\n return models.UserModel.query.get(int(user_id))",
"def load_user(user_id):\r\n return User.query.get(int(user_id))",
"def get_user(school, users_list):\n\n possible_ids = [str(user.id_) for user in users_list]\n manager_view.list_users(users_list)\n chosen_user_id = manager_view.get_id()\n\n if chosen_user_id in possible_ids:\n chosen_user_id = int(chosen_user_id)\n\n for user in users_list:\n if chosen_user_id == user.id_:\n chosen_user = user\n\n return chosen_user\n else:\n ui.print_error_message('No such user')",
"def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]",
"def findUser(username):\n connector = appEngine.connect()\n userId = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", username).fetchone()\n #selectInput = select([user]).where(user.column.userName == username)\n #db.execute(selectInput)\n return userId",
"def load_user(id):\n\n return User.query.get(int(id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))"
] | [
"0.75827193",
"0.699098",
"0.6802506",
"0.6733667",
"0.67225665",
"0.6699956",
"0.65688473",
"0.648302",
"0.6470143",
"0.6464637",
"0.6432358",
"0.6409055",
"0.63881916",
"0.6382047",
"0.6382047",
"0.6382047",
"0.6382047",
"0.63656896",
"0.6337928",
"0.6337304",
"0.63207024",
"0.6319926",
"0.62936",
"0.62903285",
"0.62784123",
"0.6274321",
"0.6272118",
"0.6267878",
"0.6267878",
"0.6267878"
] | 0.7315355 | 1 |
User menu to order products from the shop. | def __order_menu(self):
log.debug("Displaying __order_menu")
# Get the products list from the db
products = self.session.query(db.Product).filter_by(deleted=False).all()
# Create a dict to be used as 'cart'
# The key is the message id of the product list
cart: Dict[List[db.Product, int]] = {}
# Initialize the products list
for product in products:
# If the product is not for sale, don't display it
if product.price is None:
continue
# Send the message without the keyboard to get the message id
message = product.send_as_message(w=self, chat_id=self.chat.id)
# Add the product to the cart
cart[message['result']['message_id']] = [product, 0]
# Create the inline keyboard to add the product to the cart
inline_keyboard = telegram.InlineKeyboardMarkup(
[[telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"), callback_data="cart_add")]]
)
# Edit the sent message and add the inline keyboard
if product.image is None:
self.bot.edit_message_text(chat_id=self.chat.id,
message_id=message['result']['message_id'],
text=product.text(w=self),
reply_markup=inline_keyboard)
else:
self.bot.edit_message_caption(chat_id=self.chat.id,
message_id=message['result']['message_id'],
caption=product.text(w=self),
reply_markup=inline_keyboard)
# Create the keyboard with the cancel button
inline_keyboard = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"),
callback_data="cart_cancel")]])
# Send a message containing the button to cancel or pay
final_msg = self.bot.send_message(self.chat.id,
self.loc.get("conversation_cart_actions"),
reply_markup=inline_keyboard)
# Wait for user input
while True:
callback = self.__wait_for_inlinekeyboard_callback()
# React to the user input
# If the cancel button has been pressed...
if callback.data == "cart_cancel":
# Stop waiting for user input and go back to the previous menu
return
# If a Add to Cart button has been pressed...
elif callback.data == "cart_add":
# Get the selected product, ensuring it exists
p = cart.get(callback.message.message_id)
if p is None:
continue
product = p[0]
# Add 1 copy to the cart
cart[callback.message.message_id][1] += 1
# Create the product inline keyboard
product_inline_keyboard = telegram.InlineKeyboardMarkup(
[
[telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"),
callback_data="cart_add"),
telegram.InlineKeyboardButton(self.loc.get("menu_remove_from_cart"),
callback_data="cart_remove")]
])
# Create the final inline keyboard
final_inline_keyboard = telegram.InlineKeyboardMarkup(
[
[telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"), callback_data="cart_cancel")],
[telegram.InlineKeyboardButton(self.loc.get("menu_done"), callback_data="cart_done")]
])
# Edit both the product and the final message
if product.image is None:
self.bot.edit_message_text(chat_id=self.chat.id,
message_id=callback.message.message_id,
text=product.text(w=self,
cart_qty=cart[callback.message.message_id][1]),
reply_markup=product_inline_keyboard)
else:
self.bot.edit_message_caption(chat_id=self.chat.id,
message_id=callback.message.message_id,
caption=product.text(w=self,
cart_qty=cart[callback.message.message_id][1]),
reply_markup=product_inline_keyboard)
self.bot.edit_message_text(
chat_id=self.chat.id,
message_id=final_msg.message_id,
text=self.loc.get("conversation_confirm_cart",
product_list=self.__get_cart_summary(cart),
total_cost=str(self.__get_cart_value(cart))),
reply_markup=final_inline_keyboard)
# If the Remove from cart button has been pressed...
elif callback.data == "cart_remove":
# Get the selected product, ensuring it exists
p = cart.get(callback.message.message_id)
if p is None:
continue
product = p[0]
# Remove 1 copy from the cart
if cart[callback.message.message_id][1] > 0:
cart[callback.message.message_id][1] -= 1
else:
continue
# Create the product inline keyboard
product_inline_list = [[telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"),
callback_data="cart_add")]]
if cart[callback.message.message_id][1] > 0:
product_inline_list[0].append(telegram.InlineKeyboardButton(self.loc.get("menu_remove_from_cart"),
callback_data="cart_remove"))
product_inline_keyboard = telegram.InlineKeyboardMarkup(product_inline_list)
# Create the final inline keyboard
final_inline_list = [[telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"),
callback_data="cart_cancel")]]
for product_id in cart:
if cart[product_id][1] > 0:
final_inline_list.append([telegram.InlineKeyboardButton(self.loc.get("menu_done"),
callback_data="cart_done")])
break
final_inline_keyboard = telegram.InlineKeyboardMarkup(final_inline_list)
# Edit the product message
if product.image is None:
self.bot.edit_message_text(chat_id=self.chat.id, message_id=callback.message.message_id,
text=product.text(w=self,
cart_qty=cart[callback.message.message_id][1]),
reply_markup=product_inline_keyboard)
else:
self.bot.edit_message_caption(chat_id=self.chat.id,
message_id=callback.message.message_id,
caption=product.text(w=self,
cart_qty=cart[callback.message.message_id][1]),
reply_markup=product_inline_keyboard)
self.bot.edit_message_text(
chat_id=self.chat.id,
message_id=final_msg.message_id,
text=self.loc.get("conversation_confirm_cart",
product_list=self.__get_cart_summary(cart),
total_cost=str(self.__get_cart_value(cart))),
reply_markup=final_inline_keyboard)
# If the done button has been pressed...
elif callback.data == "cart_done":
# End the loop
break
# Create an inline keyboard with a single skip button
cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get("menu_skip"),
callback_data="cmd_cancel")]])
# Ask if the user wants to add notes to the order
self.bot.send_message(self.chat.id, self.loc.get("ask_order_notes"), reply_markup=cancel)
# Wait for user input
notes = self.__wait_for_regex(r"(.*)", cancellable=True)
# Create a new Order
order = db.Order(user=self.user,
creation_date=datetime.datetime.now(),
notes=notes if not isinstance(notes, CancelSignal) else "")
# Add the record to the session and get an ID
self.session.add(order)
self.session.flush()
# For each product added to the cart, create a new OrderItem
for product in cart:
# Create {quantity} new OrderItems
for i in range(0, cart[product][1]):
order_item = db.OrderItem(product=cart[product][0],
order_id=order.order_id)
self.session.add(order_item)
# Ensure the user has enough credit to make the purchase
credit_required = self.__get_cart_value(cart) - self.user.credit
# Notify user in case of insufficient credit
if credit_required > 0:
self.bot.send_message(self.chat.id, self.loc.get("error_not_enough_credit"))
# Suggest payment for missing credit value if configuration allows refill
if self.cfg.ccard["credit_card_token"] != "" \
and self.cfg.appearance["refill_on_checkout"] \
and self.Price(self.cfg.ccard["min_amount"]) <= \
credit_required <= \
self.Price(self.cfg.ccard["max_amount"]):
self.__make_payment(self.Price(credit_required))
# If afer requested payment credit is still insufficient (either payment failure or cancel)
if self.user.credit < self.__get_cart_value(cart):
# Rollback all the changes
self.session.rollback()
else:
# User has credit and valid order, perform transaction now
self.__order_transaction(order=order, value=-int(self.__get_cart_value(cart))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __products_menu(self):\n log.debug(\"Displaying __products_menu\")\n # Get the products list from the db\n products = self.session.query(db.Product).filter_by(deleted=False).all()\n # Create a list of product names\n product_names = [product.name for product in products]\n # Insert at the start of the list the add product option, the remove product option and the Cancel option\n product_names.insert(0, self.loc.get(\"menu_all_cancel\"))\n product_names.insert(1, self.loc.get(\"menu_add_product\"))\n product_names.insert(2, self.loc.get(\"menu_delete_product\"))\n # Create a keyboard using the product names\n keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_admin_select_product\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message(product_names, cancellable=True)\n # If the user has selected the Cancel option...\n if isinstance(selection, CancelSignal):\n # Exit the menu\n return\n # If the user has selected the Add Product option...\n elif selection == self.loc.get(\"menu_add_product\"):\n # Open the add product menu\n self.__edit_product_menu()\n # If the user has selected the Remove Product option...\n elif selection == self.loc.get(\"menu_delete_product\"):\n # Open the delete product menu\n self.__delete_product_menu()\n # If the user has selected a product\n else:\n # Find the selected product\n product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one()\n # Open the edit menu for that specific product\n self.__edit_product_menu(product=product)",
"def open_products_page(catalog_menu):\n catalog_menu.open_products_page()",
"def menu(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n return render(request, 'store/menu.html', context)",
"def print_product_menu():\r\n print(\"\"\"\r\n Menu\r\n 1 - Display Product Price Inventory\r\n 2 - Add New Product\r\n 3 - Save Session\r\n 4 - Exit Session \r\n \"\"\")",
"def menu_products(self, app: object, entry: str) -> None:\n while True:\n if self.back:\n break\n else:\n self.cmd_products = app.view_prod(entry)\n print(\"-\" * 50)\n for key, element in self.cmd_products.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner le produit correspondant : \"\n )\n if entry in self.cmd_products:\n if entry == \"0\":\n break\n else:\n self.menu_saving(app, entry)\n else:\n print(\"\\nCommande incorrecte\")",
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def __edit_product_menu(self, product: Optional[db.SwimPool] = None):\n log.debug(\"Displaying __edit_product_menu\")\n # Create an inline keyboard with a single skip button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_skip\"),\n callback_data=\"cmd_cancel\")]])\n # Ask for the product name until a valid product name is specified\n while True:\n # Ask the question to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_name\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id, self.loc.get(\"edit_current_value\", value=escape(product.name)),\n reply_markup=cancel)\n # Wait for an answer\n name = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ensure a product with that name doesn't already exist\n if (product and isinstance(name, CancelSignal)) or \\\n self.session.query(db.Product).filter_by(name=name, deleted=False).one_or_none() in [None, product]:\n # Exit the loop\n break\n self.bot.send_message(self.chat.id, self.loc.get(\"error_duplicate_name\"))\n # Ask for the product description\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_description\"))\n # Display the current description if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\", value=escape(product.description)),\n reply_markup=cancel)\n # Wait for an answer\n description = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ask for the product price\n self.bot.send_message(self.chat.id,\n self.loc.get(\"ask_product_price\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\",\n value=(str(self.Price(product.price))\n if product.price is not None else 'Non in vendita')),\n reply_markup=cancel)\n # Wait for an answer\n price = self.__wait_for_regex(r\"([0-9]+(?:[.,][0-9]{1,2})?|[Xx])\",\n cancellable=True)\n # If the price is skipped\n if isinstance(price, CancelSignal):\n pass\n elif price.lower() == \"x\":\n price = None\n else:\n price = self.Price(price)\n # Ask for the product image\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_image\"), reply_markup=cancel)\n # Wait for an answer\n photo_list = self.__wait_for_photo(cancellable=True)\n # If a new product is being added...\n if not product:\n # Create the db record for the product\n # noinspection PyTypeChecker\n product = db.Product(name=name,\n description=description,\n price=int(price) if price is not None else None,\n deleted=False)\n # Add the record to the database\n self.session.add(product)\n # If a product is being edited...\n else:\n # Edit the record with the new values\n product.name = name if not isinstance(name, CancelSignal) else product.name\n product.description = description if not isinstance(description, CancelSignal) else product.description\n product.price = int(price) if not isinstance(price, CancelSignal) else product.price\n # If a photo has been sent...\n if isinstance(photo_list, list):\n # Find the largest photo id\n largest_photo = photo_list[0]\n for photo in photo_list[1:]:\n if photo.width > largest_photo.width:\n largest_photo = photo\n # Get the file object associated with the photo\n photo_file = self.bot.get_file(largest_photo.file_id)\n # Notify the user that the bot is downloading the image and might be inactive for a while\n self.bot.send_message(self.chat.id, self.loc.get(\"downloading_image\"))\n self.bot.send_chat_action(self.chat.id, action=\"upload_photo\")\n # Set the image for that product\n product.set_image(photo_file)\n # Commit the session changes\n self.session.commit()\n # Notify the user\n self.bot.send_message(self.chat.id, self.loc.get(\"success_product_edited\"))",
"def sellMenu(userid, args):\r\n buildSellMenu(userid)",
"def product_management():\n sort_by = request.args.get(\"sort\")\n\n \"\"\"\n Sort method is from https://docs.mongodb.com/manual/reference/\n method/cursor.sort/index.html\n \"\"\"\n if sort_by:\n products = list(mongo.db.products.find().sort(sort_items(sort_by)))\n\n else:\n products = list(mongo.db.products.find().sort('name', 1))\n\n \"\"\"\n Pagination code is from https://gist.github.com/mozillazg/\n 69fb40067ae6d80386e10e105e6803c9\n \"\"\"\n page, per_page, offset = get_page_args(\n page_parameter='page', per_page_parameter='per_page', per_page=10)\n pagination_products = paginate_items(products, offset, per_page)\n pagination = paginate(products, page, per_page)\n\n return render_template(\n \"product_management.html\",\n page_title=\"Product Management\",\n products=pagination_products,\n pagination=pagination)",
"def products():\n\n\treturn render_template(\"products.html\")",
"def manageorder(request, product_slug, template_name=\"merchant/manageorder.html\"):\n postdata = request.POST.copy()\n cart_id = postdata['cart_id']\n customer_jid = postdata['xmpp_jid']\n cart_items2_1_accept = cart.merchant_cart_items2_1_accept(request, \"1\", cart_id)\n product_cache_key = request.path\n # try to get product from cache\n p = cache.get(product_cache_key)\n # if a cache miss, fall back on db query\n if not p:\n p = get_object_or_404(Product.active, slug=product_slug)\n # store item in cache for next time\n cache.set(product_cache_key, p, CACHE_TIMEOUT)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n venue_id = categories[0].venue.id\n # evaluate the HTTP method, change as needed\n #create the unbound form. Notice the request as a keyword argument\n form = ProductAddToCartForm(request=request, label_suffix=':')\n # assign the hidden input the product slug\n form.fields['product_slug'].widget.attrs['value'] = product_slug\n # set test cookie to make sure cookies are enabled\n request.session.set_test_cookie()\n stats.log_product_view(request, p)\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))",
"def productactivate():\n pass",
"def add_products():\n result = order_obj.add_products(request.forms) \n return result",
"def go_product_ingredients_page(self, driver, product_id):\n pass",
"def option_two():\n if ADD_PRODUCTS == {}:\n print \"\\n**No products availabe**\" #Cannot to buy\n press_enter()\n reset()\n main_menu()\n else:\n ask_if_want()",
"def main_menu_input(self):\n if self.choice_menu == \"1\":\n self.category_choice()\n elif self.choice_menu == \"2\":\n print(fr.FR[9])\n for element in self.substitution_table.get_substitution():\n for substitution in element:\n sub_prod = self.product_table.get_product(substitution)\n print(sub_prod[0][1] + \" - \"\n + sub_prod[0][2] + \" - \"\n + sub_prod[0][3] + \" - \"\n + sub_prod[0][4])\n print(\"\\n\")\n elif self.choice_menu == \"3\":\n self.initialise_bdd()\n self.save_product_bdd()\n elif self.choice_menu == \"4\":\n self.leave_main_menu -= 1",
"def add_to_cart(update, context):\n query = update.callback_query\n bot = context.bot\n # loads json received from callback_data into dictionary\n ids = json.loads(query.data)\n category_id = ids['category_id']\n product_id = ids['product_id']\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n # checks if chat already made an order\n if chat_id in cart:\n # checks if user already made an order\n if user.id in cart[chat_id]:\n # checks user already ordered from category\n if category_id in cart[chat_id][user.id]:\n # checks if user already ordered product\n if product_id in cart[chat_id][user.id][category_id]:\n # increase count how often product was ordered\n cart[chat_id][user.id][category_id][product_id] += 1\n else:\n cart[chat_id][user.id][category_id][product_id] = 1\n else:\n cart[chat_id][user.id][category_id] = {product_id: 1}\n else:\n cart[chat_id][user.id] = {category_id: {product_id: 1}}\n else:\n cart[chat_id] = {user.id: {category_id: {product_id: 1}}}\n\n # option to order more or go back to start menu\n keyboard = [[InlineKeyboardButton(\"order more\", callback_data=str(TWO))],\n [InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n\n # add last message text to product ordered\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text='Added ' + menu[category_id]['products'][product_id][\n 'name'] + ' to your order!',\n reply_markup=InlineKeyboardMarkup(keyboard))\n\n return SEVENTH",
"def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Cargar Catalogo de peliculas\")\n print(\"3- Buscar productoras\")\n print(\"0- Salir\")",
"def contextMenuEvent(self, e):\n\n count = len(self.selectedObjects())\n menu = QtWidgets.QMenu()\n\n if count:\n self.__menuActions.shows().addAction(menu, \"properties\")\n if count == 1:\n menu.addSeparator()\n self.__menuActions.shows().addAction(menu, \"createSubscription\")\n\n menu.exec_(QtCore.QPoint(e.globalX(), e.globalY()))",
"def post(self):\n args = parser.parse_args()\n return Products().add_product(\n args['name'],\n args['quantity'],\n args['price'],\n args['reorder'])",
"async def shop(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n messages = item_helpers.print_shop(ctx.author.id)\n await self.paginate(ctx, messages)",
"def display_menu(self):\n print(\"\"\"\nLogistic System Menu\n1. Add Vehicles\n2. Add Item To The Cart\n3. Complete The Order\n4. Track The Order\n5. Quit \"\"\")",
"def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)",
"def test_purchase_products(self, driver):\n logging.info(\"Start test case: checkout product successfully\")\n products = self.test_data[\"Purchase Products\"][\"Products\"]\n address = self.test_data[\"Purchase Products\"][\"Address\"]\n payment_info = self.test_data[\"Purchase Products\"][\"Payment Info\"]\n logging.info(f\"Test Data: {self.test_data['Purchase Products']}\")\n\n select_product(driver, products[0][\"Page\"], products[0][\"Product Name\"])\n add_product_to_cart(driver, products[0][\"Size\"], products[0][\"Color\"], products[0][\"Quantity\"])\n checkout_from_order_summary(driver)\n set_address(driver, address[\"Billing Address\"], address[\"Country\"], address[\"City\"], address[\"Zip\"])\n checkout_order_to_pay(driver, payment_info[\"Payment Type\"])\n pay_order(driver, payment_info[\"Card ID\"], payment_info[\"Expired Date\"], payment_info[\"CVC\"])\n verify_message(driver, \"Order was successful\")",
"def manageorder2(request, product_slug, template_name=\"merchant/manageorder2.html\"):\n postdata = request.POST.copy()\n cart_id = postdata['cart_id']\n customer_jid = postdata['xmpp_jid']\n cart_items2_2_accept = cart.merchant_cart_items2_2_accept(request, \"1\", cart_id)\n product_cache_key = request.path\n # try to get product from cache\n p = cache.get(product_cache_key)\n # if a cache miss, fall back on db query\n if not p:\n p = get_object_or_404(Product.active, slug=product_slug)\n # store item in cache for next time\n cache.set(product_cache_key, p, CACHE_TIMEOUT)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n venue_id = categories[0].venue.id\n # evaluate the HTTP method, change as needed\n #create the unbound form. Notice the request as a keyword argument\n form = ProductAddToCartForm(request=request, label_suffix=':')\n # assign the hidden input the product slug\n form.fields['product_slug'].widget.attrs['value'] = product_slug\n # set test cookie to make sure cookies are enabled\n request.session.set_test_cookie()\n stats.log_product_view(request, p)\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))",
"def openproducts(self):\n\n print \"Open products\"\n self.combo_product_list.setEnabled(True)\n frame=self.combo_area_list.currentText()\n self.combo_product_list.clear()\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n print str(frame)\n list_glo=[]\n if str(frame) == \"GLOBAL\":\n for key in self.dict_prod.keys():\n if str(frame) in key :\n list_glo.append(str(key))\n ind=0\n #print \"Frame %s \" %(frame)\n for key in self.dict_prod.keys():\n if str(frame) == \"BAL\":\n frame1=\"_BAL_\"\n frame2=\"-BAL-\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"NWS\":\n frame1=\"NORTHWESTSHELF_\"\n frame2=\"NWS\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"GLOBAL\":\n if str(frame) in key :\n if ind == 0 :\n self.combo_product_list.addItem(list_glo[5])\n elif ind == 5 : \n self.combo_product_list.addItem(list_glo[0])\n else : \n self.combo_product_list.addItem(list_glo[ind])\n ind+=1\n else :\n if str(frame) in key :\n self.combo_product_list.addItem(str(key))\n self.combo_dataset_list.setEnabled(True)",
"def choice_product(self):\n self.first_number = 0\n self.leave_choice_product = 1\n while self.leave_choice_product:\n print(fr.FR[11])\n self.display_product(self.products)\n self.input_product = input(fr.FR[12])\n self.choice_product_input()",
"def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })",
"def index(self, user):\n\n cart_products = CartProduct.index(user)\n CartProductsView.index(cart_products)",
"def show_products():\n\n print \"These are the products in sale\"\n for key, value in ADD_PRODUCTS.iteritems():\n print \"%s: Q%.2f\" % (key, value)"
] | [
"0.73180735",
"0.72016907",
"0.6804957",
"0.6651566",
"0.64211893",
"0.6275277",
"0.6244939",
"0.6185193",
"0.6105577",
"0.59807813",
"0.5963281",
"0.5960109",
"0.59552896",
"0.5910854",
"0.5890219",
"0.58691454",
"0.58535814",
"0.58395153",
"0.5799699",
"0.57818246",
"0.5745944",
"0.57265186",
"0.5720868",
"0.57194793",
"0.56665546",
"0.56649864",
"0.5664848",
"0.56303513",
"0.55735964",
"0.55340075"
] | 0.73403376 | 0 |
Display the status of the sent orders. | def __order_status(self):
log.debug("Displaying __order_status")
# Find the latest orders
orders = self.session.query(db.Order) \
.filter(db.Order.user == self.user) \
.order_by(db.Order.creation_date.desc()) \
.limit(20) \
.all()
# Ensure there is at least one order to display
if len(orders) == 0:
self.bot.send_message(self.chat.id, self.loc.get("error_no_orders"))
# Display the order status to the user
for order in orders:
self.bot.send_message(self.chat.id, order.text(w=self, session=self.session, user=True))
# TODO: maybe add a page displayer instead of showing the latest 5 orders | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))",
"def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result",
"def show_orders():\n return 'hehe'",
"def _printOrderStatus(self, targetorders):\n result = list(self.getList(self.root))\n open_order = filter(lambda y: (y[1] % 2) != 0, result)\n close_order = filter(lambda y: ((y[1] % 2) == 0 and y[1] != 0), result)\n open = list(open_order)\n close = list(close_order)\n close_order_count = 0\n for x in close:\n result = x[1] // 2\n close_order_count += result\n open_order_count = 0\n for x in open:\n result = x[1] + 1 // 2\n open_order_count += result\n balance = targetorders - (open_order_count + close_order_count)\n print(f'Open Orders: {open_order_count}')\n print(f'Closed Orders: {close_order_count}')\n print(f'Yet to be fulfilled: {balance}')\n print('------------------------------------')",
"def export_order_status_button(cls, store_views):\n pass",
"def display_status(self):\n time = float2str(self.scheduler.time, '10.2f')\n tx = float2str(self.tx_total, '10g')\n rx = float2str(self.rx_total, '10g')\n dup = float2str(self.dup_total, '10g')\n uniq_total = float2str(self.uniq_total, '10g')\n delivered_total = float2str(self.delivered_total, '10g')\n uniq_delivered_total = float2str(self.uniq_delivered_total, '10g')\n print(\n 'define status_l text Time:{},____TX:{},____RX:{},____DUP:{},____Delivered:{}__/__{},____Arrived:{} 14 white 0.5 0.05'\n .format(time, tx, rx, dup, uniq_delivered_total, uniq_total,\n delivered_total))",
"async def get_order_status(self, symbol, order_id, client_order_id):\n params = {\n \"symbol\": symbol,\n \"orderId\": str(order_id),\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", \"/api/v3/order\", params=params, auth=True)\n return success, error",
"def status(self, msg):\n oscid = self.app.global_osc_id()\n print(\"STATUS : /Llia/%s : %s\" % (oscid, msg))",
"async def get_order_status(self, symbol, order_id, client_order_id):\n uri = \"/fapi/v1/order\"\n params = {\n \"symbol\": symbol,\n \"orderId\": str(order_id),\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", uri, params=params, auth=True)\n return success, error",
"async def status(self, context):\n await self.send_message(context, await self.status_msg_packed(context))",
"async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]",
"def printOrders(self, event):\n \n pass",
"def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n\n balance_df = self.get_balance_df()\n lines.extend([\"\", \" Balances:\"] + [\" \" + line for line in balance_df.to_string(index=False).split(\"\\n\")])\n\n exchanges_df = self.exchanges_df()\n lines.extend([\"\", \" Exchanges:\"] + [\" \" + line for line in exchanges_df.to_string(index=False).split(\"\\n\")])\n\n try:\n orders_df = self.active_orders_df()\n lines.extend([\"\", \" Active Orders:\"] + [\" \" + line for line in orders_df.to_string(index=False).split(\"\\n\")])\n except ValueError:\n lines.extend([\"\", \" No active maker orders.\"])\n\n return \"\\n\".join(lines)",
"def print_quotation(self):\n self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})\n return self.env['report'].get_action(self, 'ferrua_report.sale_order')",
"def statuses(self):\n big = BigCommerceAPI()\n response = big.get('orderstatuses')\n return response.text",
"def _print_status(self):",
"async def get_order_status(self, order_no):\n uri = \"/v3/spot/order\"\n params = {\n \"order_id\": order_no\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error",
"async def getstatuses(self, ctx):\n final_list = \"\"\n statuses = await ex.get_bot_statuses()\n if statuses is not None:\n for status in await ex.get_bot_statuses():\n final_list += f\"{status[0]}\\n\"\n else:\n final_list = \"None\"\n embed = discord.Embed(title=\"Statuses\", description=final_list)\n await ctx.send(embed=embed)",
"def ConfirmedTradeStatus():\n return 'FO Confirmed'",
"def triggerPrintOrderStatus(self, prompt):\n if prompt is not None:\n split_prompt = prompt.split(':')\n truck_id = int(split_prompt[1].lstrip())\n print(f'The following status of {truck_id} orders:')\n self._printOrderStatus(truck_id)",
"def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')",
"def show_status():\n\n pass",
"def present_status(self):\n output = ''\n if self.stats['hand']:\n output += 'Ready: \\n'\n for card in sorted(self.stats['hand'], key=itemgetter('name')):\n output += card_format(card) + '\\n'\n output += '\\n'\n if self.stats['active']:\n output += 'Active: \\n'\n for card in self.stats['active']:\n output += card_format(card) + '\\n'\n if self.stats['discard']:\n output += '\\nSpent: \\n'\n for card in self.stats['discard']:\n output += card_format(card) + '\\n'\n output += '\\n'\n output += 'Spells: \\n'\n for power in self.stats['powers']:\n output += '%s x %d\\n' % (power, self.stats['powers'][power])\n if self.stats['opponent']:\n output += '\\nCurrent Activity:\\n'\n output += '%s' % (card_format(self.stats['opponent']))\n header_print('Status')\n print(output)",
"def _print_send_status(event_data):\n message_count = (int(event_data[\"name\"]) - 1000) + 1\n\n if message_count % 5 == 0:\n print(f\"Sent {message_count} messages.\", end=\"\\r\")",
"async def status(self, msg, *args):\n content = self.get_status()\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def orders(request):\n return render(request, 'orders/orderList.html')",
"def export_order_status(self, store_views=None):\n if store_views is None:\n store_views = self.search([])\n\n for store_view in store_views:\n store_view.export_order_status_for_store_view()",
"def order_update_print():\n result = order_obj.order_update_print(request.forms) \n return result",
"def process_order_queue(self, order_ids=None):\n # find ids if not given, and only keep the not sent orders\n if not order_ids:\n orders = self.search([('state', 'not in', ['sent'])])\n else:\n orders = self.browse(order_ids).filtered(lambda r: not r.state in ['sent'])\n\n # prepare all the orders\n orders._prepare_printing()\n # deliver only the 'ready' ones\n orders.filtered(lambda r: r.state == 'ready')._deliver_printing()\n # validate the sending, only on the correctly sent\n orders.filtered(lambda r: r.state == 'sent')._validate_printing()\n\n # error control : built the list of user to notify\n # create a dict 'user_to_notify' where\n # key = user_id\n # value = list of tuple (order_id, error_message) for all order not sent correctly\n user_to_notify = {}\n for record in orders.filtered(lambda record: record.state == 'error'):\n user_to_notify.setdefault(record.user_id.id, list()).append((record.id, record.error_message))\n\n # send a message to the author of the failed print orders\n template = self.env['ir.model.data'].xmlid_to_object('print.print_user_notify_failed_email_template')\n for user_id in user_to_notify.keys():\n template.with_context(print_errors=user_to_notify[user_id]).send_mail(user_id, force_send=True)",
"def status(self, **options):\n pass"
] | [
"0.6624454",
"0.6607733",
"0.6512122",
"0.6492815",
"0.6353827",
"0.633038",
"0.61956954",
"0.6176387",
"0.6156388",
"0.6110003",
"0.6095435",
"0.6048326",
"0.6021349",
"0.601456",
"0.6001526",
"0.5956618",
"0.59367436",
"0.5935654",
"0.5873689",
"0.58426803",
"0.58011866",
"0.5798615",
"0.5780065",
"0.5763038",
"0.57559454",
"0.5730093",
"0.56608117",
"0.5653922",
"0.56522477",
"0.56498706"
] | 0.8103798 | 0 |
Send information about the bot. | def __bot_info(self):
log.debug("Displaying __bot_info")
self.bot.send_message(self.chat.id, self.loc.get("bot_info")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def info(ctx):\n embed = discord.Embed(title=\"Zane Bot\", description=\"All hail the hypnotoad!\", color=0x0091C5)\n\n # give info about you here\n embed.add_field(name=\"Author\", value=\"Zanexius\")\n\n # Shows the number of servers the bot is member of.\n embed.add_field(name=\"Server count\", value=f\"{len(bot.guilds)}\")\n\n # give users a link to invite thsi bot to their server\n embed.add_field(name=\"Invite\", value=\"[Invite link](<insert your OAuth invitation link here>)\")\n\n await ctx.send(embed=embed)",
"async def botinfo(self, context: Context) -> None:\n embed = discord.Embed(\n description=\"Used [Krypton's](https://krypton.ninja) template\",\n color=0x9C84EF,\n )\n embed.set_author(name=\"Bot Information\")\n embed.add_field(name=\"Owner:\", value=\"Krypton#7331\", inline=True)\n embed.add_field(\n name=\"Python Version:\", value=f\"{platform.python_version()}\", inline=True\n )\n embed.add_field(\n name=\"Prefix:\",\n value=f\"/ (Slash Commands) or {self.bot.config['prefix']} for normal commands\",\n inline=False,\n )\n embed.set_footer(text=f\"Requested by {context.author}\")\n await context.send(embed=embed)",
"async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)",
"async def info(self, context):\n await context.send('creador: [email protected]\\ncolabs:\\n emi: https://twitter.com/emilianosce/ o https://www.instagram.com/emilianosce/ \\n garza: https://twitter.com/Matias_Garcia00 o https://www.twitch.tv/garzangb')",
"async def info_bot(self, message):\n pythonVersion = platform.python_version()\n dpyVersion = discord.__version__\n serverCount = len(self.bot.guilds)\n memberCount = len(set(self.bot.get_all_members()))\n mem1 = self.bot.get_user(854230635425693756)\n embed = discord.Embed(\n title=f\"{mem1.name} Stats \",\n description=f\"{self.bot.user.name} Bot is a MultiPrupose Bot Customised for FRNz COmmunity. Made By <@448740493468106753>\",\n colour=discord.Color.blurple(),\n timestamp=datetime.utcnow(), )\n\n embed.add_field(name=\"Bot Version:\", value=self.bot.version)\n embed.add_field(name=\"Python Version:\", value=pythonVersion)\n embed.add_field(name=\"Discord.Py Version\", value=dpyVersion)\n embed.add_field(name=\"Total Guilds:\", value=serverCount)\n embed.add_field(name=\"Total Users:\", value=memberCount)\n embed.add_field(name=\"Bot Made By:\", value=\"<@448740493468106753>\")\n\n embed.set_footer(text=f\"{message.guild.name} | {self.bot.user.name}\")\n embed.set_author(name=self.bot.user.name,\n icon_url=self.bot.user.avatar.url)\n embed.set_thumbnail(url=self.bot.user.avatar.url)\n await message.channel.send(embed=embed)",
"async def botinfo(ctx):\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name='Bot Info', value = \"I'm made with the library Discord.py Async.\"\n \" I'm developed by Shutdown.py#2406. \"\n \"If you need any help with me, Join my [devs' server](https://discord.gg/X4CJdEM).\"\n \"Send feedback using the feedback command\")\n embed.add_field(name='Total Commands', value=(len(bot.commands)))\n embed.add_field(name = 'Invite Me!', value = '[Invite](https://discordbots.org/bot/399115688792424448)')\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)",
"async def botinfo(self, ctx: commands.Context) -> None:\n\n embed = CleanEmbed(\n author_text=\"About Freddy\",\n description=f\"Freddy is a powerful multi-purpose bot, developed and designed with ease of use in mind. \"\n f\"Created {(datetime.utcnow() - self.bot.user.created_at).days} days ago, he has been providing \"\n f\"value to many guilds for a long time.\",\n thumbnail_url=self.bot.user.avatar_url,\n fields=[\n {\"name\": \"Commands\", \"value\": f\"{len(self.bot.commands)} public commands\", \"inline\": True},\n {\"name\": \"Maintainment\", \"value\": \"Developed and designed by Harry\", \"inline\": True},\n {\"name\": \"Invite Freddy\", \"value\": \"Invite here\", \"inline\": True},\n {\"name\": \"Timeline\", \"value\":\n \"~~-~~**1**~~------~~**2**~~-------~~**3**~~------------------~~**4**~~-----------~~**5**~~-~~ \\n\\n\"\n \"**1** - \" + self.bot.user.created_at.strftime(\"%B %Y\") + \" - Freddy was created \\n\"\n \"**2** - November 2019 - Development was paused \\n\"\n \"**3** - January 2020 - Development resumed and Freddy grew rapidly \\n\"\n \"**4** - December 2020 - Freddy's development stopped \\n\"\n \"**5** - May 2021 - In process of re-designing and bot verification\"\n }\n ]\n )\n\n await ctx.send(embed=embed)",
"async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)",
"async def info(self, ctx):\n\n uptime = func.time_(self.bot.launch_time)\n users = sum(1 for _ in self.bot.get_all_members())\n channels = sum(1 for _ in self.bot.get_all_channels())\n\n author = self.bot.get_user(299879858572492802)\n\n invite = 'https://discordapp.com/oauth2/authorize?client_id=347205176903335937&scope=bot&permissions=470150359'\n about = ('Infamous is a actively developed bot that gets updated daily.'\n f' It is written with passion by {author} using the Rewrite branch of the discord.py library.')\n\n links = (f'**[[Invite Bot]]({invite})** \\n'\n '**[[Fame Discord]](https://discord.gg/NY2MSA3)** \\n'\n '**[[Discord.py]](https://github.com/Rapptz/discord.py/tree/rewrite)** \\n'\n '**[[Support]](https://discord.gg/JyJTh4H)**')\n\n # From Modelmat\n cpu_usage = self.process.cpu_percent() / psutil.cpu_count()\n ram_usage = self.process.memory_full_info().uss / 1024 ** 2\n\n embed = discord.Embed(color=self.bot.embed_color)\n embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n embed.description = 'A multi-purpose bot with image manipulation, wiki pages and it\\'s own rpg; originally a ' \\\n 'community bot for ★ Fame ★'\n embed.set_thumbnail(\n url=self.bot.user.avatar_url)\n\n embed.add_field(name='About', value=about, inline=False)\n\n embed.add_field(name='Statistics 📈',\n value=(f'**{len(self.bot.guilds)} guilds.**\\n'\n f'**{channels} channels.**\\n'\n f'**{users} users.** \\n'\n f'**{self.bot.lines} lines**'), inline=True)\n\n embed.add_field(name='Uptime ⏰', value=(f'**{uptime[0]} days.** \\n'\n f'**{uptime[1]} hours.** \\n'\n f'**{uptime[2]} minutes.** \\n'\n f'**{uptime[3]} seconds.**'), inline=True)\n\n embed.add_field(name='Developer 🕵', value=author)\n embed.add_field(name='Resources 💻', value='`CPU:` {:.2f}% \\n`MEM:` {:.2f}'.format(cpu_usage, ram_usage))\n embed.add_field(name='Links 🔗', value=links, inline=True)\n\n await ctx.send(embed=embed)",
"async def info(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n appinfo = await self.client.application_info()\n membercount = sum(1 for x in self.client.get_all_members())\n info_embed = discord.Embed(title=f\"Miso Bot | version {main.version}\",\n description=f\"Created by {appinfo.owner.mention}\\n\\n\"\n f\"Use `{self.client.command_prefix}help` to get the list of commands, \"\n f\"or visit the documention website for more help.\"\n f\"\\n\\nCurrently active in **{len(self.client.guilds)}** \"\n f\"servers totaling **{membercount}** unique users\",\n colour=discord.Colour.red())\n\n # info_embed.set_footer(text=f'version 2.0')\n info_embed.set_thumbnail(url=self.client.user.avatar_url)\n info_embed.add_field(name='Github', value='https://github.com/joinemm/miso-bot', inline=False)\n info_embed.add_field(name='Documentation', value=\"http://joinemm.me/misobot\", inline=False)\n info_embed.add_field(name='Patreon', value=\"https://www.patreon.com/joinemm\", inline=False)\n await ctx.send(embed=info_embed)",
"async def info(self):\n # [p]info\n\n await self.bot.say(strings.info.format(\n CacheAPI.get(key='dwarf_repository'),\n CacheAPI.get(key='dwarf_invite_link')))",
"async def botinfo(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, 'Not a bot.')\n\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n \n e = discord.Embed(\n title=f'Available bot info for {bot}',\n color=0xfecdea,\n description=f\"**Short Bot Description:** (do `uwu desc [bot]` for big description)\\n\\n*{data['Small_desc']}*\"\n )\n\n if data[\"bot_status\"] == \"online\":\n status = '<:online:805576670353948702> Online'\n elif data[\"bot_status\"] == \"idle\":\n status = '<:idle:805855470778056725> Idle'\n elif data[\"bot_status\"] == \"offline\":\n status = '<:offline:805576352450871346> Offline'\n elif data[\"bot_status\"] == \"dnd\":\n status = '<:dnd:819964146317393990> Do Not Disturb'\n\n listed_at = datetime.datetime.strptime(data[\"list_date\"], '%Y-%m-%d')\n\n e.add_field(\n name='Owner:', value=f'**{data[\"owner_name\"]}**\\n({data[\"owner_id\"]})', inline=False)\n e.add_field(name='Tags:', value=', '.join(data[\"tops\"]))\n e.add_field(name='Vanity URL:', value=data[\"vanity_url\"]\n if data[\"vanity_url\"] != '' else 'No vanity URL set.', inline=False)\n e.add_field(name='Bot Status:', value=status)\n e.add_field(name='Invites:',\n value=f'[Bot Invite]({data[\"invite\"]})\\n[Bot Support Server](https://discord.gg/{data[\"discord\"]})', inline=False)\n e.add_field(name='Other Bot Info:', value=f'''\n **Prefix:** `{data[\"prefix\"]}`\n **Site:** {data[\"site\"] if data[\"site\"] != '' else \"No sites.\"}\n **Library:** {data[\"lib\"]}\n **Listed at:** {listed_at}\n **Server Count:** {data[\"servers\"] if data[\"servers\"] != 'None' else '*Not set up!*'}''', inline=False)\n e.set_thumbnail(url=f'https://cdn.discordapp.com/avatars/{data[\"id\"]}/{data[\"avatar\"]}')\n await em(ctx, embed=e)",
"def details(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=str(owner))",
"async def info(self, ctx):\n if ctx.guild is not None:\n await ctx.reply(\"This command can only be used in DMs, because of privacy reasons.\")\n raise commands.CommandError(\"Invoker not in DMs.\")\n\n if not is_linked(ctx.author.id):\n await ctx.reply(f\"You don't have a Spotify account linked. Please link one using \"\n f\"`{self.bot_config['prefix']}link`.\")\n raise commands.CommandError(\"User has no spotify account linked.\")\n\n sp = init_spotify(ctx.author.id)\n result = sp.me()\n msg_embed = Embed()\n msg_embed.title = \"Linked Spotify account\"\n msg_embed.url = result['external_urls'].get('spotify', None)\n if len(result['images']) > 0:\n msg_embed.set_image(url=result['images'][0]['url'])\n msg_embed.add_field(name=\"Display name\", value=result['display_name'])\n msg_embed.add_field(name=\"Subscription type\", value=result.get('product', 'free'))\n if result.get('product', None) != \"premium\":\n msg_embed.add_field(name=\"Warning!\",\n value=\"Only accounts with Spotify Premium can use this bot!\",\n inline=False)\n await ctx.reply(embed=msg_embed)",
"async def botinfo(self, ctx):\n\n dev = await self.bot.fetch_user(170506717140877312)\n\n start = perf_counter()\n status_msg = await ctx.send('Beregner ping...')\n end = perf_counter()\n ping = int((end - start) * 1000)\n\n now = time()\n diff = int(now - self.bot.uptime)\n days, remainder = divmod(diff, 24 * 60 * 60)\n hours, remainder = divmod(remainder, 60 * 60)\n minutes, seconds = divmod(remainder, 60)\n\n process = Process(getpid())\n memory_usage = round(process.memory_info().rss / 1000000, 1)\n cpu_percent = process.cpu_percent()\n\n total_members = []\n online_members = []\n idle_members = []\n dnd_members = []\n offline_members = []\n for guild in self.bot.guilds:\n for member in guild.members:\n if member.id in total_members:\n continue\n total_members.append(member.id)\n if str(member.status) == 'online':\n online_members.append(member.id)\n elif str(member.status) == 'idle':\n idle_members.append(member.id)\n elif str(member.status) == 'dnd':\n dnd_members.append(member.id)\n elif str(member.status) == 'offline':\n offline_members.append(member.id)\n\n embed = discord.Embed(color=ctx.me.color, url=self.bot.misc['website'])\n embed.set_author(name=dev.name, icon_url=dev.avatar_url)\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n embed.add_field(name='Dev', value=f'{dev.mention}\\n{dev.name}#{dev.discriminator}')\n embed.add_field(name='Oppetid', value=f'{days}d {hours}t {minutes}m {seconds}s')\n embed.add_field(name='Ping', value=f'Ekte ping: {ping} ms\\nWebsocket ping: {int(self.bot.latency * 1000)} ms')\n embed.add_field(name='Servere', value=len(self.bot.guilds))\n embed.add_field(name='Discord.py', value=discord.__version__)\n embed.add_field(name='Python', value=platform.python_version())\n embed.add_field(name='Ressursbruk', value=f'RAM: {memory_usage} MB\\nCPU: {cpu_percent}%')\n embed.add_field(name='Maskin', value=f'{platform.system()} {platform.release()}')\n embed.add_field(name=f'Brukere ({len(total_members)})',\n value=f'{self.bot.emoji[\"online\"]}{len(online_members)} ' +\n f'{self.bot.emoji[\"idle\"]}{len(idle_members)} ' +\n f'{self.bot.emoji[\"dnd\"]}{len(dnd_members)} ' +\n f'{self.bot.emoji[\"offline\"]}{len(offline_members)}')\n embed.add_field(name='Lenker', value='[Inviter](https://discordapp.com/oauth2/authorize?client_' +\n f'id={self.bot.user.id}&permissions=388174&scope=bot) ' +\n f'| [Nettside]({self.bot.misc[\"website\"]}) ' +\n f'| [Kildekode]({self.bot.misc[\"source_code\"]})')\n await Defaults.set_footer(ctx, embed)\n await status_msg.edit(embed=embed, content=None)",
"async def info(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n embed = discord.Embed()\n embed.colour = discord.Colour.blurple()\n embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)\n\n embed.title = f'Your current level : {level}'\n\n embed.add_field(name='Question', value=f'{self.enigmas[level][\"question\"]}')\n\n embed.set_footer(text='I love Ducks')\n\n await ctx.send(embed=embed)",
"async def about(self, ctx: Context):\n embed = discord.Embed(\n colour=ctx.me.colour,\n description=f'I am {self.bot.user}, a bot made by {self.bot.owner}. My prefix is `{self.bot.prefix}`.'\n ).set_author(name=f'About {self.bot.user.name}:', icon_url=self.bot.user.avatar_url)\n\n await ctx.send(embed=embed)",
"def handle(bot, update):\n print(update.message.text)\n bot.send_message(chat_id=update.message.chat_id,\n text='Hey! I\\'m Meditech Bot')",
"async def info(ctx, message):\n if ctx.args.command == None:\n embed = discord.Embed()\n embed.add_field(name=\"Profile\", value=ctx.profile.name)\n embed.add_field(name=\"Mode\", value=ctx.profile.mode)\n embed.set_author(name=ctx.user.name, icon_url=ctx.user.avatar_url)\n await message.channel.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"{0.profile.prefix}{0.args.command.name} {0.args.command.usage}\".format(ctx),\n description=ctx.args.command.help,\n url=\"https://github.com/Synixe/Bot/blame/master/{0}#L{1.start}L{1.end}\".format(ctx.args.command.file.replace(os.getcwd(), \"\"), ctx.args.command)\n )\n embed.set_footer(text=ctx.args.command.extension.fullname + \".\" + ctx.args.command.name)\n await message.channel.send(embed=embed)",
"def send_game_info( game, client_key, from_name, send_message_func ): # TODO: change game to lobby?\n\n game_info = message.Message( client_key, 'd' )\n new_message = game_info.new_message(from_name, game.game.game_name, game.get_player_names(),\n game.game.min_players, game.game.max_players, game.get_time_till_start())\n game_info.message = new_message\n game_info.to_clients = [ client_key ]\n\n send_message_func( game_info )",
"async def roominfo(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"Name: {self.room.name} • Description: {self.room.description} • ID: {self.room.id} • Member Count: {self.room.count} • Created at: {self.room.created_at} • Is Private?: {self.room.is_private}\"\n\t\t)",
"async def info(self, ctx):\r\n openfile = open(\"info.txt\", \"r\")\r\n embed = discord.Embed(title='Aristobot', description='This is a bot made by Aristoza that uses the TrueSkill '\r\n 'python package (http://trueskill.org/) which is based on '\r\n 'the '\r\n 'TrueSkill rating system developed by Microsoft.',\r\n color=33023)\r\n embed.add_field(name='How it works', value=openfile.read(), inline=False)\r\n await ctx.send(embed=embed)",
"async def serverinfo_command(self, ctx):\n owner = str(ctx.guild.owner.mention)\n id = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n textChannels = len(ctx.guild.text_channels)\n voiceChannels = len(ctx.guild.voice_channels)\n roles = len(ctx.guild.roles)\n guildCreatedate = ctx.guild.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p\")\n\n embed = Embed(\n title=f\"Info of {ctx.guild.name} Server\",\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_thumbnail(url=ctx.guild.icon_url)\n fields = [\n (\"Server ID\", id, True),\n (\"Server Region\", region.capitalize(), True),\n (\"Owner\", owner, True),\n (\"Member Count\", memberCount, True),\n (\"Text Channels\", textChannels, True),\n (\"Voice Channels\", voiceChannels, True),\n (\"Role Count\", roles, True),\n (\"Created on\", guildCreatedate, True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)",
"def update_info(self):\n\n r = requests.get(self.url + 'getMe')\n if r.status_code == 200:\n response = json.loads(r.text)\n if response['ok']:\n bot_info = response['result']\n self.user_id = bot_info['id']\n self.first_name = bot_info['first_name']\n if 'last_name' in bot_info:\n self.last_name = bot_info['last_name']\n if 'username' in bot_info:\n self.username = bot_info['username']\n else:\n raise TelegramError('The result was not \"ok\"')\n else:\n raise TelegramError('Did not get a 200 response', r.status_code)",
"async def bot_info(self, id: int) -> dict:\n return await self._do_request(\"get\", botinfo_address, self._user_auth,\n params={\"id\": id})",
"def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)",
"async def _me_stats(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n await ctx.send(users.print_account(ctx.user_object, printequipment=False))",
"async def hello(self):\t# << This is the actual command, or input # << Info\r\n\r\n await self.bot.say(\"Hi there!\")\t# << This is the output\r",
"def info(self, msg, *args, **kwargs):\n pass",
"def send(self, text):\n log.msg('me %s' % (text))\n self.sendLine(text)"
] | [
"0.725934",
"0.7055274",
"0.7054091",
"0.7039354",
"0.6996456",
"0.6912311",
"0.68851507",
"0.6837405",
"0.68016917",
"0.67736316",
"0.671528",
"0.6668429",
"0.6624",
"0.65209925",
"0.64319694",
"0.6400704",
"0.63974255",
"0.6349702",
"0.63252884",
"0.62954056",
"0.62543094",
"0.62419116",
"0.6240868",
"0.62144905",
"0.6199781",
"0.61886436",
"0.61688155",
"0.61616373",
"0.6148365",
"0.6142715"
] | 0.8450503 | 0 |
Display the admin menu to select a product to edit. | def __products_menu(self):
log.debug("Displaying __products_menu")
# Get the products list from the db
products = self.session.query(db.Product).filter_by(deleted=False).all()
# Create a list of product names
product_names = [product.name for product in products]
# Insert at the start of the list the add product option, the remove product option and the Cancel option
product_names.insert(0, self.loc.get("menu_all_cancel"))
product_names.insert(1, self.loc.get("menu_add_product"))
product_names.insert(2, self.loc.get("menu_delete_product"))
# Create a keyboard using the product names
keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names]
# Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)
self.bot.send_message(self.chat.id, self.loc.get("conversation_admin_select_product"),
reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))
# Wait for a reply from the user
selection = self.__wait_for_specific_message(product_names, cancellable=True)
# If the user has selected the Cancel option...
if isinstance(selection, CancelSignal):
# Exit the menu
return
# If the user has selected the Add Product option...
elif selection == self.loc.get("menu_add_product"):
# Open the add product menu
self.__edit_product_menu()
# If the user has selected the Remove Product option...
elif selection == self.loc.get("menu_delete_product"):
# Open the delete product menu
self.__delete_product_menu()
# If the user has selected a product
else:
# Find the selected product
product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one()
# Open the edit menu for that specific product
self.__edit_product_menu(product=product) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __edit_product_menu(self, product: Optional[db.SwimPool] = None):\n log.debug(\"Displaying __edit_product_menu\")\n # Create an inline keyboard with a single skip button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_skip\"),\n callback_data=\"cmd_cancel\")]])\n # Ask for the product name until a valid product name is specified\n while True:\n # Ask the question to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_name\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id, self.loc.get(\"edit_current_value\", value=escape(product.name)),\n reply_markup=cancel)\n # Wait for an answer\n name = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ensure a product with that name doesn't already exist\n if (product and isinstance(name, CancelSignal)) or \\\n self.session.query(db.Product).filter_by(name=name, deleted=False).one_or_none() in [None, product]:\n # Exit the loop\n break\n self.bot.send_message(self.chat.id, self.loc.get(\"error_duplicate_name\"))\n # Ask for the product description\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_description\"))\n # Display the current description if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\", value=escape(product.description)),\n reply_markup=cancel)\n # Wait for an answer\n description = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ask for the product price\n self.bot.send_message(self.chat.id,\n self.loc.get(\"ask_product_price\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\",\n value=(str(self.Price(product.price))\n if product.price is not None else 'Non in vendita')),\n reply_markup=cancel)\n # Wait for an answer\n price = self.__wait_for_regex(r\"([0-9]+(?:[.,][0-9]{1,2})?|[Xx])\",\n cancellable=True)\n # If the price is skipped\n if isinstance(price, CancelSignal):\n pass\n elif price.lower() == \"x\":\n price = None\n else:\n price = self.Price(price)\n # Ask for the product image\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_image\"), reply_markup=cancel)\n # Wait for an answer\n photo_list = self.__wait_for_photo(cancellable=True)\n # If a new product is being added...\n if not product:\n # Create the db record for the product\n # noinspection PyTypeChecker\n product = db.Product(name=name,\n description=description,\n price=int(price) if price is not None else None,\n deleted=False)\n # Add the record to the database\n self.session.add(product)\n # If a product is being edited...\n else:\n # Edit the record with the new values\n product.name = name if not isinstance(name, CancelSignal) else product.name\n product.description = description if not isinstance(description, CancelSignal) else product.description\n product.price = int(price) if not isinstance(price, CancelSignal) else product.price\n # If a photo has been sent...\n if isinstance(photo_list, list):\n # Find the largest photo id\n largest_photo = photo_list[0]\n for photo in photo_list[1:]:\n if photo.width > largest_photo.width:\n largest_photo = photo\n # Get the file object associated with the photo\n photo_file = self.bot.get_file(largest_photo.file_id)\n # Notify the user that the bot is downloading the image and might be inactive for a while\n self.bot.send_message(self.chat.id, self.loc.get(\"downloading_image\"))\n self.bot.send_chat_action(self.chat.id, action=\"upload_photo\")\n # Set the image for that product\n product.set_image(photo_file)\n # Commit the session changes\n self.session.commit()\n # Notify the user\n self.bot.send_message(self.chat.id, self.loc.get(\"success_product_edited\"))",
"def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})",
"def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")",
"def show_admin():\n return render_admin_page(\"admin.html\")",
"def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can edit products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Update Successful!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request, 'Update Failed. \\\n Please check that the details in the form are valid ')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n 'on_edit_product_page': True\n }\n\n return render(request, template, context)",
"def print_product_menu():\r\n print(\"\"\"\r\n Menu\r\n 1 - Display Product Price Inventory\r\n 2 - Add New Product\r\n 3 - Save Session\r\n 4 - Exit Session \r\n \"\"\")",
"def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n \n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n # This will get data from form and to update the product instance called above\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated product!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request, 'Failed to update product. Please ensure the form is valid.')\n else:\n # populate the form with product instance\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)",
"def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")",
"def product_detail(request, product_id):\n\n product = get_object_or_404(Product, pk=product_id)\n options = None\n\n if 'option' in request.GET:\n options = request.GET['option']\n options = list(Option.objects.filter(name__in=options))\n\n context = {\n 'product': product,\n 'options': options,\n }\n\n return render(request, 'products/product_detail.html', context)",
"def open_products_page(catalog_menu):\n catalog_menu.open_products_page()",
"def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)",
"def edit_product(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated product.')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request,\n 'Failed to update, please ensure form is valid.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing { product.name }')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)",
"def _onchange_product_id(self):\n if not self.product_id:\n return\n else :\n thisid = self.search([\n ('product_id', '=', self.product_id.id),\n\n ], order='id', limit=1)\n # return {\n # 'type': 'ir.actions.act_window',\n # 'res_model': 'rental.shopify_product',\n # 'views': [[False, 'form']],\n # 'res_id': thisid.id,\n # }\n\n\n self.update({\n 'is_Edit' : True,\n 'edit_id' : thisid.id,\n 'shopify_product_title': self.product_id.title,\n 'rental_pricing_ids' : thisid.rental_pricing_ids\n\n })",
"def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n\n if request.method == \"POST\":\n product_form = EditProductForm(request.POST, request.FILES,\n instance=product)\n if product_form.is_valid:\n product = product_form.save()\n messages.success(request, f'You have successfully updated \\\n product {product}.')\n return redirect('products')\n else:\n messages.error(request, 'Failed to update product. \\\n Please ensure the form is valid.')\n\n product_form = EditProductForm(instance=product)\n\n # Get all the product images to display on the edit form\n product_images = product.images.all()\n\n messages.info(request, f'You are editing product: \\\n {product}')\n\n template = 'auctionsmng/edit_product.html'\n\n context = {\n 'product_form': product_form,\n 'product': product,\n 'images': product_images,\n }\n return render(request, template, context)",
"def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'The product was successfully updated!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request, 'Failed to update the product. Please\\\n ensure the form is valid.')\n\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)",
"def edit_products_field(self):\n text = '<table style=\"padding:5px;\">'\n subscription_products = SubscriptionProduct.objects.filter(subscription=self)\n for sp in subscription_products:\n text += (\n '<tr style=\"padding:5px;\"><td style=\"padding:5px;\">{}</td><td style=\"padding:5px;\">{} un.</td>'\n '<td style=\"padding:5px;\">{}</td></tr>'.format(\n sp.product.name, sp.copies, sp.address\n )\n )\n text += \"</table>\"\n text += (\n \"<a href='/admin/core/subscription/{}/' target='_blank'>Edit</a>\".format(\n self.id\n )\n )\n return mark_safe(text)",
"def editar_prod(self):\n\t codigo=self.ui.codigo_prod.text()\n\t\tnombre=self.ui.nombre_prod.text()\n\t\tdescripcion=self.ui.descripcion_prod.text()\n\t\tmarca=self.ui.marca_prod.text()\n\t\tcolor=self.ui.color_prod.text()\n\t\tresultado=controller.editar_producto(codigo,nombre,descripcion,marca,color)\n\t\tif resultado:\n\t\t self.reject()",
"def edit_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'You have successfully updated store item!')\n return redirect(reverse('home'))\n else:\n messages.error(request, 'Failed to update item. Please check the form.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_item.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)",
"def set_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.product_kendo_dropdown_locator, product)",
"def permission(self):\n return \"core.manage_products\"",
"def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def edit(self):\n\n pass",
"def search_products_as_admin_single_page(self, **kwargs):\n return slurp(\n 'search_products_as_admin',\n self.search_products_as_admin,\n 'ProductViewDetails',\n **kwargs\n )",
"def getEditForm( self ):\n return \"listc_edit\"",
"def admin():\n return render_template('bulkform.html')",
"def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)",
"def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)",
"def display_product_from_id(self, product_id):\n self.cur.execute(\"SELECT name, brand, nova, stores, id FROM Product WHERE id = %s\", (product_id, ))\n response = self.cur.fetchall()\n response = response[0]\n print (\"{} de la marque {} (indice nova : {}), disponible dans les magasins {}.\\n\"\n \"Lien vers une description complete https://fr.openfoodfacts.org/produit/{}\\n\".\n format(response[0], response[1], response[2], response[3], response[4]))",
"def edit_form():\n return template (\"edit\")",
"def admin(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/admin/admin.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Colmeia | Administrador',\r\n 'year': datetime.now().year,\r\n })\r\n )"
] | [
"0.6703032",
"0.6085533",
"0.6074324",
"0.60685647",
"0.60130733",
"0.59926045",
"0.59859747",
"0.5963964",
"0.594613",
"0.5924462",
"0.5916004",
"0.5900565",
"0.5900224",
"0.588214",
"0.58810765",
"0.5868658",
"0.58564746",
"0.58458424",
"0.58431137",
"0.5818649",
"0.56913155",
"0.5669103",
"0.56549364",
"0.5614906",
"0.5596135",
"0.5582863",
"0.5582863",
"0.55667275",
"0.55499053",
"0.5512245"
] | 0.67454296 | 0 |
Display the latest transactions, in pages. | def __transaction_pages(self):
log.debug("Displaying __transaction_pages")
# Page number
page = 0
# Create and send a placeholder message to be populated
message = self.bot.send_message(self.chat.id, self.loc.get("loading_transactions"))
# Loop used to move between pages
while True:
# Retrieve the 10 transactions in that page
transactions = self.session.query(db.Transaction) \
.order_by(db.Transaction.transaction_id.desc()) \
.limit(10) \
.offset(10 * page) \
.all()
# Create a list to be converted in inline keyboard markup
inline_keyboard_list = [[]]
# Don't add a previous page button if this is the first page
if page != 0:
# Add a previous page button
inline_keyboard_list[0].append(
telegram.InlineKeyboardButton(self.loc.get("menu_previous"), callback_data="cmd_previous")
)
# Don't add a next page button if this is the last page
if len(transactions) == 10:
# Add a next page button
inline_keyboard_list[0].append(
telegram.InlineKeyboardButton(self.loc.get("menu_next"), callback_data="cmd_next")
)
# Add a Done button
inline_keyboard_list.append(
[telegram.InlineKeyboardButton(self.loc.get("menu_done"), callback_data="cmd_done")])
# Create the inline keyboard markup
inline_keyboard = telegram.InlineKeyboardMarkup(inline_keyboard_list)
# Create the message text
transactions_string = "\n".join([transaction.text(w=self) for transaction in transactions])
text = self.loc.get("transactions_page", page=page + 1, transactions=transactions_string)
# Update the previously sent message
self.bot.edit_message_text(chat_id=self.chat.id, message_id=message.message_id, text=text,
reply_markup=inline_keyboard)
# Wait for user input
selection = self.__wait_for_inlinekeyboard_callback()
# If Previous was selected...
if selection.data == "cmd_previous" and page != 0:
# Go back one page
page -= 1
# If Next was selected...
elif selection.data == "cmd_next" and len(transactions) == 10:
# Go to the next page
page += 1
# If Done was selected...
elif selection.data == "cmd_done":
# Break the loop
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))",
"def history():\n\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = ? ORDER BY date DESC, time DESC\", session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions)",
"def history():\n\n # get all transactions for current user\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n\n # render history.html with all user transactions\n return render_template(\"history.html\", transactions=transactions, usd=usd)",
"def history():\n\n transactions = db.execute(\"SELECT stock, amount, price, date, time, total_amount FROM transactions WHERE id=:id\", id=session['user_id'])\n\n\n return render_template(\"index.html\", transactions=transactions)",
"def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n\n #Convert Price to US Dollars and format transaction time\n for t in trans:\n t.price = usd(t.price)\n t.transacted = t.transacted.strftime('%Y-%m-%d %H:%M:%S')\n\n #Return history.html\n return render_template('history.html', trans=trans)",
"def see_all_transfers(request):\n transfers = Transaction.objects.all().order_by('-executed_time')\n return render(request, 'app/allTransfers.html', {'transfers': transfers})",
"def history():\n transactions_list = db.execute(\"SELECT stock, units, price, time, type FROM transactions WHERE id = :current_id\",\n current_id=session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions_list)",
"def main_page(self):\n return render_template(\"index.html\", traders_count=len(self.market.traders),\n current_iteration=self.market.current_iteration, traders=self.market.traders,\n buy_orders=self.market.buy_orders, sell_orders=self.market.sell_orders,\n current_stock_price=self.market.stock.price_history[-1])",
"def history():\n\n userId = session[\"user_id\"]\n\n shares = db.execute(f\"SELECT symbol, shares, price, trans_time FROM transactions WHERE user_id={userId} ORDER BY trans_id DESC\")\n\n return render_template(\"history.html\", shares=shares)",
"def history():\n\n rows = db.execute(\"SELECT * FROM 'transaction' WHERE u_id = :user_id\", user_id = session[\"user_id\"])\n return render_template(\"history.html\", rows = rows)",
"def main_page():\n pages=get_accounts()\n return render_template('disp.html',pages=pages)",
"def history():\n rows = db.execute(text(\n \"SELECT symbol, shares, price, time FROM transactions \"\n \"WHERE user_id=:id\"),\n id=session[\"user_id\"])\n transactions = []\n for row in rows:\n transaction = dict(row)\n transaction[\"price\"] = usd(transaction[\"price\"])\n transactions.append(transaction)\n return render_template(\"history.html\", transactions=transactions)",
"def index(request):\n\n queryset_list = Todo.objects.all() #.order_by(\"-timestamp\")\n page = request.GET.get('page', 1)\n\n paginator = Paginator(queryset_list, 2)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"taskli\": queryset, \n }\n return render(request, \"lists/task_list.html\", context)",
"def page(request, pagenum):\n context = Paginator().filter(Book.objects.all(), pagenum)\n return render(request, 'books/bookListPage.html', context)",
"async def last_page(self):\n await self.show_page(self.maximum_pages)",
"def history():\n userid = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM purchase WHERE userid = :userid\", userid = userid)\n for transaction in transactions:\n transaction[\"price\"] = usd(transaction[\"tot\"]/transaction[\"shares\"])\n transaction[\"name\"] = lookup(transaction[\"symbol\"])['name']\n return render_template(\"history.html\", transactions=transactions)",
"def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)",
"def display_pages():\n pages = utils.get_pages(g.graph)\n return render_template('display.html', app_id=FB_APP_ID,app_name=FB_APP_NAME, user=g.user, pages = pages)",
"def get(self):\n accounts = self.get_account_data()\n transactions = self.get_transaction_data()\n return render_template(\n \"index.html\", page_name=\"Main\", accounts=accounts, transactions=transactions\n )",
"def get(self):\n args = request.args\n page = int(args.get('page', 1))\n filters = []\n if \"filter_trade_market\" in args:\n filter_trade_market = request.args.getlist('filter_trade_market')\n filters.append(CurrencyPurchaseTransactions.stock_market_id.in_(filter_trade_market))\n if 'start_date' in request.args:\n start_date = datetime.strptime(args['start_date'], '%Y-%m-%d')\n filters.append(CurrencyPurchaseTransactions.timestamp >= start_date)\n if 'end_date' in request.args:\n end_date = datetime.strptime(args['end_date'], '%Y-%m-%d')\n end_date += timedelta(days=1)\n else:\n end_date = start_date + timedelta(days=1)\n filters.append(CurrencyPurchaseTransactions.timestamp < end_date)\n\n query_current = CurrencyPurchaseTransactions.query.filter(and_(*filters)).paginate(page=page,\n per_page=10,\n error_out=True)\n\n transactions = []\n for transaction in query_current.items:\n data = transaction.to_json()\n data.update(transaction.get_purchase_status())\n transactions.append(data)\n\n transactions.append({'number_of_pages': query_current.pages,\n \"current_page\": query_current.page,\n \"has_next_page\": query_current.has_next,\n \"has_prev_page\": query_current.has_prev})\n\n return transactions, 200",
"def show_page_list():\r\n\tpage_list = Page.objects.filter(in_nav=1).order_by('order')\r\n\treturn {'page_list': page_list}",
"def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Price, Date FROM history WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n return render_template(\"history.html\", transactionList = transactions, currentUser=session.get(\"user_id\"))",
"def history():\n get_trans_codes = db.execute(\"SELECT transaction_code FROM History WHERE id = :id\", id=session['user_id'])\n get_symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id\", id=session['user_id'])\n get_companies = db.execute(\"SELECT company FROM History WHERE id = :id\", id=session['user_id'])\n get_trans_types = db.execute(\"SELECT transaction_type FROM History WHERE id = :id\", id=session['user_id'])\n get_shares = db.execute(\"SELECT shares FROM History WHERE id = :id\", id=session['user_id'])\n get_prices = db.execute(\"SELECT price FROM History WHERE id = :id\", id=session['user_id'])\n get_timestamps = db.execute(\"SELECT timestamp FROM History WHERE id = :id\", id=session['user_id'])\n\n trans_codes = [code['transaction_code'] for code in get_trans_codes]\n symbols = [symbol['symbol'] for symbol in get_symbols]\n companies = [company['company'] for company in get_companies]\n trans_types = [types['transaction_type'] for types in get_trans_types]\n shares = [share['shares'] for share in get_shares]\n prices = [price['price'] for price in get_prices]\n timestamps = [timestamp['timestamp'] for timestamp in get_timestamps]\n\n return render_template(\"history.html\", values=zip(trans_codes, symbols, companies, trans_types, shares, prices, timestamps))",
"def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)",
"def history():\n userID = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM transactions WHERE id=:userID\", userID=userID)\n\n for row in transactions:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"total\"] = usd(row[\"num_shares\"] * row[\"price_ps\"])\n\n return render_template(\"history.html\", transactions=transactions)",
"def get(self, request):\n pages = self.get_queryset().all()\n return render(request, 'list.html', {\n 'pages': pages\n })",
"def get(self, request):\n pages = self.get_queryset().all()\n return render(request, 'list.html', {\n 'pages': pages\n })",
"def paginated(self) -> global___Snippet.Paginated:",
"def history():\n # query database for history\n transactions = db.execute(\"SELECT symbol, volume, share_price, dtstamp FROM `transaction` WHERE id = :id\", id = session[\"user_id\"])\n\n # initialise dict\n dic = {}\n\n # interate through history array\n\n # pass data to template\n return render_template(\"history.html\", transactions = transactions)",
"def index(request):\n receitas = Receita.objects.order_by('-data_postagem').filter(publicado=True)\n paginator = Paginator(receitas, 3)\n page = request.GET.get('page')\n receitas_por_pagina = paginator.get_page(page)\n dados = {\n 'receitas': receitas_por_pagina\n }\n return render(request, 'index.html', dados)"
] | [
"0.688372",
"0.6692908",
"0.65745246",
"0.65152967",
"0.6440288",
"0.6437697",
"0.6429272",
"0.6316394",
"0.628526",
"0.6220077",
"0.6215298",
"0.61452043",
"0.6122129",
"0.6120994",
"0.6119554",
"0.60814244",
"0.6068557",
"0.60655093",
"0.60478973",
"0.60313505",
"0.6014012",
"0.60004485",
"0.59819996",
"0.5979269",
"0.5963718",
"0.5938277",
"0.5938277",
"0.59066963",
"0.5898859",
"0.58909535"
] | 0.76497626 | 0 |
Generate a .csv file containing the list of all transactions. | def __transactions_file(self):
log.debug("Generating __transaction_file")
# Retrieve all the transactions
transactions = self.session.query(db.Transaction).order_by(db.Transaction.transaction_id.asc()).all()
# Create the file if it doesn't exists
try:
with open(f"transactions_{self.chat.id}.csv", "x"):
pass
except IOError:
pass
# Write on the previously created file
with open(f"transactions_{self.chat.id}.csv", "w") as file:
# Write an header line
file.write(f"UserID;"
f"TransactionValue;"
f"TransactionNotes;"
f"Provider;"
f"ChargeID;"
f"SpecifiedName;"
f"SpecifiedPhone;"
f"SpecifiedEmail;"
f"Refunded?\n")
# For each transaction; write a new line on file
for transaction in transactions:
file.write(f"{transaction.user_id if transaction.user_id is not None else ''};"
f"{transaction.value if transaction.value is not None else ''};"
f"{transaction.notes if transaction.notes is not None else ''};"
f"{transaction.provider if transaction.provider is not None else ''};"
f"{transaction.provider_charge_id if transaction.provider_charge_id is not None else ''};"
f"{transaction.payment_name if transaction.payment_name is not None else ''};"
f"{transaction.payment_phone if transaction.payment_phone is not None else ''};"
f"{transaction.payment_email if transaction.payment_email is not None else ''};"
f"{transaction.refunded if transaction.refunded is not None else ''}\n")
# Describe the file to the user
self.bot.send_message(self.chat.id, self.loc.get("csv_caption"))
# Reopen the file for reading
with open(f"transactions_{self.chat.id}.csv") as file:
# Send the file via a manual request to Telegram
requests.post(f"https://api.telegram.org/bot{self.cfg.telegram['token']}/sendDocument",
files={"document": file},
params={"chat_id": self.chat.id,
"parse_mode": "HTML"})
# Delete the created file
os.remove(f"transactions_{self.chat.id}.csv") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_customers(self):\n output = ''\n for i in range(len(self.customers)):\n output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\\n'\n #print(output)\n with open('oneday.csv','a') as outfile:\n for i in range(len(self.customers)):\n outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\\n')",
"def gen_csv(self, show_headers=True, show_tags=True):\n class TextOut:\n \"\"\"Simple string output source to capture CSV\"\"\"\n def __init__(self):\n self.data = ''\n def write(self, s):\n self.data += s\n def get(self):\n data = self.data\n self.data = ''\n return data\n output = TextOut()\n writer = csv.writer(output)\n for raw in self.gen_raw(show_headers, show_tags):\n writer.writerow(raw)\n yield output.get()",
"def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)",
"def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida",
"def generate_csv_output(payslip_data):\n payslip_output = StringIO(newline=None)\n csvFileWriter = csv.writer(payslip_output, delimiter=',')\n\n data = [['Full Name', 'Payment Period', 'Gross Income',\n 'Income Tax', 'Net Income', 'Super']]\n\n for employee in payslip_data:\n data.append([\n employee['full_name'],\n employee['payment_period'],\n str(employee['gross_income']),\n str(employee['income_tax']),\n str(employee['net_income']),\n str(employee['super_amount'])\n ])\n\n csvFileWriter.writerows(data)\n\n return payslip_output",
"def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])",
"def create_csv(request):\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output = io.StringIO()\n\n headers = []\n if income_history:\n for i in income_history[0]:\n if i != 'income_history_id':\n headers.append(i)\n\n writer = csv.DictWriter(output, dialect='excel', quoting=csv.QUOTE_ALL, fieldnames=headers)\n writer.writeheader()\n\n if income_history:\n for entry in income_history:\n del entry['income_history_id']\n writer.writerow(entry)\n\n response = file_streaming_response('text/csv', 'income_history.csv', output)\n return response",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)",
"def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()",
"def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)",
"def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)",
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])",
"def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)",
"def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response",
"def make_echo_csv (list_of_region_tuples):\n\n #initialize the Echo formatted output dataframe\n out = pd.DataFrame(columns= ['Source Plate Name', 'Source Plate Type', 'Source Well', 'Sample ID', 'Sample Name', \\\n 'Sample Group', 'Sample Comment', 'Destination Plate Name', 'Destination Well', 'Transfer Volume'])\n\n idx = 0 #have to use a counter because we go through multiple lists and can't return to idx=0 each time\n #there may be a list of region tuples with source wells, volumes, dest wells\n for region in list_of_region_tuples:\n #for each well location to be shot from the current region\n for well in region[2]:\n #add the dest well\n out.loc[idx, 'Destination Well'] = well\n #Add the source well and transfer volume for that region\n out.loc[idx, ['Source Well', 'Transfer Volume']] = [region[0], region[1]]\n idx += 1\n\n #Set the unchanging names for the dataframe\n out[['Source Plate Name', 'Source Plate Type', 'Destination Plate Name']] = ['Source[1]', '384PP_AQ_BP', 'Destination[1]']\n\n return out",
"def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")",
"def csv_export(self,\n states=None,\n fields=None,\n filenamebase='projects',\n delimiter=',',\n newline='\\r\\n',\n ):\n \n if fields is None:\n fields = self.fields()\n \n out = StringIO()\n out.write(delimiter.join(fields) + newline)\n\n for project in self.data():\n values = []\n for field in project:\n text = field['text']\n if type(text) is UnicodeType:\n text = text.encode('utf8')\n value = CSV_TEMPLATE % text\n values.append(value)\n out.write(delimiter.join(values) + newline)\n \n value = out.getvalue()\n out.close()\n\n timestamp = datetime.today().strftime(\"%Y%m%d%H%M\")\n filename = filenamebase + timestamp + '.csv'\n \n self.request.RESPONSE.setHeader('Content-Type', 'application/x-msexcel')\n self.request.RESPONSE.setHeader(\"Content-Disposition\", \n \"inline;filename=%s\"%filename)\n\n return value",
"def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)",
"def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response",
"def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')",
"def get_string_of_transactions(self):\n s = \"\"\n for transaction in self.transactions:\n s += transaction.to_string()\n return s",
"def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()",
"def payment_engine(transaction_filename: str):\n try:\n transaction_list = TransactionList(pd.read_csv(transaction_filename))\n account_manager = AccountManager()\n transaction_list.process(account_manager)\n accounts = account_manager.accounts_data()\n print(accounts.to_csv(index=False))\n except FileNotFoundError:\n pass",
"def to_csv_file(self, records):\n self._file_manager.make_dir_when_no_dir(self._directory)\n csv_file = os.path.join(self._directory, self._file_name + \".csv\")\n record_lines = [rec.to_csv_string() + \"\\n\" for rec in records]\n self._file_manager.write_lines(csv_file, record_lines)",
"def print(self):\n df = self.gen_test()\n # print(df)\n df.to_csv('some_dated_file.csv', index=False)\n return df",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def build_report(rows):\n\n outfile = NamedTemporaryFile(suffix='.csv', delete=False)\n\n with open(outfile.name, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Column #1', 'Column #2', 'Column #3'])\n for i in range(int(rows)):\n writer.writerow(['Row #%d' % i, 'from task', 'build_report'])\n\n outfile.close()\n return outfile.name"
] | [
"0.66963094",
"0.6593577",
"0.6530911",
"0.6499839",
"0.647537",
"0.63679814",
"0.63550764",
"0.63113767",
"0.6298641",
"0.6165292",
"0.615324",
"0.6120086",
"0.6116998",
"0.61141175",
"0.61109525",
"0.6062832",
"0.6051639",
"0.60443205",
"0.60440016",
"0.60371464",
"0.6027558",
"0.6023878",
"0.6014981",
"0.6001623",
"0.59853727",
"0.5968954",
"0.5966909",
"0.5958367",
"0.5945794",
"0.59444225"
] | 0.69846463 | 0 |
Add an administrator to the bot. | def __add_admin(self):
log.debug("Displaying __add_admin")
# Let the admin select an administrator to promote
user = self.__user_select()
# Allow the cancellation of the operation
if isinstance(user, CancelSignal):
return
# Check if the user is already an administrator
admin = self.session.query(db.Admin).filter_by(user_id=user.user_id).one_or_none()
if admin is None:
# Create the keyboard to be sent
keyboard = telegram.ReplyKeyboardMarkup([[self.loc.get("emoji_yes"), self.loc.get("emoji_no")]],
one_time_keyboard=True)
# Ask for confirmation
self.bot.send_message(self.chat.id, self.loc.get("conversation_confirm_admin_promotion"),
reply_markup=keyboard)
# Wait for an answer
selection = self.__wait_for_specific_message([self.loc.get("emoji_yes"), self.loc.get("emoji_no")])
# Proceed only if the answer is yes
if selection == self.loc.get("emoji_no"):
return
# Create a new admin
admin = db.Admin(user=user,
edit_products=False,
receive_orders=False,
create_transactions=False,
is_owner=False,
display_on_help=False)
self.session.add(admin)
# Send the empty admin message and record the id
message = self.bot.send_message(self.chat.id, self.loc.get("admin_properties", name=str(admin.user)))
# Start accepting edits
while True:
# Create the inline keyboard with the admin status
inline_keyboard = telegram.InlineKeyboardMarkup([
[telegram.InlineKeyboardButton(
f"{self.loc.boolmoji(admin.edit_products)} {self.loc.get('prop_edit_products')}",
callback_data="toggle_edit_products"
)],
[telegram.InlineKeyboardButton(
f"{self.loc.boolmoji(admin.receive_orders)} {self.loc.get('prop_receive_orders')}",
callback_data="toggle_receive_orders"
)],
[telegram.InlineKeyboardButton(
f"{self.loc.boolmoji(admin.create_transactions)} {self.loc.get('prop_create_transactions')}",
callback_data="toggle_create_transactions"
)],
[telegram.InlineKeyboardButton(
f"{self.loc.boolmoji(admin.display_on_help)} {self.loc.get('prop_display_on_help')}",
callback_data="toggle_display_on_help"
)],
[telegram.InlineKeyboardButton(
self.loc.get('menu_done'),
callback_data="cmd_done"
)]
])
# Update the inline keyboard
self.bot.edit_message_reply_markup(message_id=message.message_id,
chat_id=self.chat.id,
reply_markup=inline_keyboard)
# Wait for an user answer
callback = self.__wait_for_inlinekeyboard_callback()
# Toggle the correct property
if callback.data == "toggle_edit_products":
admin.edit_products = not admin.edit_products
elif callback.data == "toggle_receive_orders":
admin.receive_orders = not admin.receive_orders
elif callback.data == "toggle_create_transactions":
admin.create_transactions = not admin.create_transactions
elif callback.data == "toggle_display_on_help":
admin.display_on_help = not admin.display_on_help
elif callback.data == "cmd_done":
break
self.session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def addadmin(self, ctx, user: discord.Member):\n self.settings.addAdmin(user.id)\n await ctx.send(\"done\")",
"def addAdmin(self, softwareProfileName, adminUsername):\n return self._sp_db_api.addAdmin(softwareProfileName, adminUsername)",
"def cmd_addadmin_private(self, argument):\n if self.is_admin:\n users = argument.split()\n for user in users:\n self.bot.admins.add(user)\n self.send(self.nick, _(\"User %s added to admins\"), user)\n self.logger.info(\"User %s added %s to admins\" % (self.nick, user))\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"addadmin\"))",
"def admins_add(request):\n username = request.params['add']\n try:\n accounts.make_admin(username)\n except accounts.NoSuchUserError:\n request.session.flash(\n _(\"User {username} doesn't exist.\".format(username=username)),\n \"error\")\n return admins_index(request)",
"def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )",
"def add_administrator(self, project_id, name, email):\n self._run(\n url_path=\"contributors/add\",\n id=project_id,\n name=name,\n email=email,\n admin=True\n )\n return True",
"def add_admin():\n email = Config.SITE_ADMIN\n password = input('Enter Admin Password >>> ')\n name = input('Enter Display Name >>> ')\n\n user = User(email, password, name)\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n print(\"%s has been added to the system as Admin\" % user.username)",
"async def _ad_add(self, ctx, member: discord.Member):\n new_admin = sql.TalosAdmin((ctx.guild.id, member.id))\n if new_admin not in self.database.get_admins(ctx.guild.id):\n self.database.save_item(new_admin)\n await ctx.send(f\"Added admin {member.name}!\")\n else:\n await ctx.send(\"That user is already an admin!\")",
"def add_bot(self, bot):\n self.add_user(bot)",
"async def admin_add(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if not isAlready:\n query = \"INSERT INTO wormhole_admin (name, admin) VALUES (?, ?)\"\n self.bot.db_query(query, (wormhole, user.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-added\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.already-admin\", user=user.name\n )\n )",
"async def admin(ctx):\n info = await(bot.application_info())\n mention = info.owner.mention\n message = \"My administrator is the glorious {}. Fear them, for they are mighty.\".format(mention)\n await(ctx.send(message))",
"def add_admin():\n admin_role = Role.query.filter_by(permissions=0xFF).first()\n admin = User.query.filter_by(email=current_app.config['PILI_ADMIN']).first()\n if not admin:\n admin_user = User(\n email=current_app.config['PILI_ADMIN'],\n username=current_app.config['PILI_ADMIN_NAME'],\n password=generate_password(10),\n role=admin_role,\n confirmed=True,\n )\n db.session.add(admin_user)\n db.session.commit()",
"def add_admin(self, username, password):\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO admins VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()",
"async def addme(self, ctx):\n invite_url = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(8))\n embed = self.bot.embeds.embed_builder(title='Add this bot to your own Discord server',\n description=invite_url,\n has_footer=False)\n await ctx.send(embed=embed)",
"def do_admins(bot, msg, **kwargs):\n channel = kwargs.get('event').get('channel')\n bot.post_msg(\n text='My admins are: {admins}'.format(\n admins=', '.join([bot.format_user_mention(x) for x in bot.masters.values()])\n ),\n channel_name_or_id=channel\n )\n \n return True",
"def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()",
"def promote_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(id) + \"admin\").encode()).hexdigest()\n curs.execute(\"INSERT INTO admins(id) VALUES(?)\", (encrypted_id,))\n header.commit()\n self.__update_admin_cache()",
"def administrator():\n\n administrator = Administrator.objects.create(name='Michał', surname='Paluch',\n login='Udfsr43', password='Password_3',\n password_repeat='Password_3')\n return administrator",
"def add_administrator_interface(self, udp_port: int, login :str, password: str):\n self.administrator_cfg = AdministratorCfg(udp_port=udp_port,\n login=login,\n password=password)\n return self",
"def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)",
"def add_admin_command(username):\n try:\n if add_user([username,], True):\n print(\"Successfully added %s as an administrator.\" % (username,))\n else:\n print(\n \"Failed to add %s as an administrator. Have you run the \"\n \"initdb command? Are you sure that string fits the RIT \"\n \"username format?\" % (username,)\n )\n except IntegrityError:\n print(\"%s is already an administrator. No action taken.\" % (username,))\n sys.exit(2)",
"def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()",
"def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()",
"def __addNewAdminQuery(self,admin_id,username,password,name,comment,creator_id):\n return ibs_db.createInsertQuery(\"admins\",{\"admin_id\":admin_id,\n \"username\":dbText(username),\n \"password\":dbText(password),\n \"name\":dbText(name.strip()),\n \"comment\":dbText(comment.strip()),\n \"creator_id\":dbText(creator_id),\n \"deposit\":0,\n \"due\":0\n })",
"def create_admin():\n db.session.add(User(\n email=str(hashlib.sha256(\"[email protected]\".encode()).hexdigest()),\n password=\"admin\",\n admin=True,\n confirmed=True,\n confirmed_on=datetime.datetime.now())\n )\n db.session.commit()",
"def add_admin(user_id=None):\r\n try:\r\n if user_id:\r\n user = db.session.query(model.user.User)\\\r\n .get(user_id)\r\n require.user.update(user)\r\n if user:\r\n user.admin = True\r\n db.session.commit()\r\n return redirect(url_for(\".users\"))\r\n else:\r\n msg = \"User not found\"\r\n return format_error(msg, 404)\r\n except Exception as e: # pragma: no cover\r\n current_app.logger.error(e)\r\n return abort(500)",
"def create_admin():\n db.session.add(User(\n email=\"[email protected]\",\n password=\"admin\",\n admin=True,\n confirmed=True)\n )\n db.session.commit()",
"async def insert_administrator(self, role_id: int) -> None:\r\n\r\n query = \"\"\"\r\n INSERT INTO administrators (role)\r\n VALUES (%s)\r\n \"\"\"\r\n async with self.conn.cursor() as cur:\r\n await cur.execute(query, (role_id,))",
"def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)",
"def addAdmin(username, sshId, user, identity):\n if identity:\n env.key_filename = identity\n if user:\n env.user = user\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n sudo('usermod -p \"\" %s' % username)\n sudo('chage -d 0 %s' % username)\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)"
] | [
"0.7431501",
"0.69612104",
"0.69173306",
"0.6861339",
"0.6813076",
"0.6811589",
"0.6779468",
"0.6773782",
"0.67275375",
"0.6693339",
"0.6648171",
"0.66383785",
"0.66203934",
"0.64802665",
"0.63945043",
"0.61657405",
"0.6165614",
"0.6116291",
"0.60149425",
"0.6012939",
"0.600511",
"0.59945565",
"0.59945565",
"0.598601",
"0.59672606",
"0.59670705",
"0.5966631",
"0.5964557",
"0.59403145",
"0.5897491"
] | 0.74137247 | 1 |
Given a frame from the camera and a destination, figure out which direction to take next | def get_next_direction(current_frame, scanner, code):
# ### thresholding. susceptible to glare, solve with masking tape?
thresh = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
# success, thresh = cv2.threshold(thresh, BW_THRESHOLD, 255, cv2.THRESH_BINARY)
# if not success:
# print "Could not threshold frame, skipping."
# # Okay to return 'STRAIGHT' here because the thresholding error will cause the
# # speed calculator to bail out and we'll skip the frame.
# return 'STRAIGHT'
pil_image = Image.fromarray(thresh, 'L')
width, height = pil_image.size
raw = pil_image.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanResult = scanner.scan(image)
if scanResult:
for symbol in image:
# do something useful with results
print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
report_data_to_webserver(symbol.data)
if symbol.data == code:
return 'STOP'
# if QR code found, and QR code text is the desired destination, return stop
return 'STRAIGHT' # Can be one of STRAIGHT, STOP. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def camera_frame_directions(self) -> _BFRAME_TYPE:\n pass",
"def camera_frame_directions(self) -> _BFRAME_TYPE:\n\n return self._base_frame_directions",
"def get_direction(position, next_position):\n x, y = position\n next_x, next_y = next_position\n if x == next_x:\n if y < next_y:\n return constants.Action.Right\n else:\n return constants.Action.Left\n elif y == next_y:\n if x < next_x:\n return constants.Action.Down\n else:\n return constants.Action.Up\n raise constants.InvalidAction(\"We did not receive a valid position transition.\")",
"def step_towards(self, x, y, target_x, target_y):\n path = libtcod.path.new_using_map(self.fov_map)\n libtcod.path.compute(path, x, y, target_x, target_y)\n (t_x, t_y) = libtcod.path.walk(path, False)\n if t_x is None:\n return None, None\n else:\n return t_x - x, t_y - y",
"def getNextDest(self):\n\n if self.direction_forward:\n if len(self.destinations)-1 == self.current_loc: #if Autobuz reaches rightmost destination, it also takes a break and changes directions\n self.direction_forward = False #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration) #return destination reached and elapsed time\n \n else:\n self.current_loc += 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration\n \n else:\n if 0 == self.current_loc: #if Autobuz reaches leftmost destination, it also takes a break and changes directions\n self.direction_forward = True #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration)\n \n else:\n self.current_loc -= 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration",
"def _calc_relative_move_direction(self, char, direction):\n if char in (\"Left\", \"Right\"):\n di = -1 if self.video.hflip else 1\n else:\n di = -1 if self.video.vflip else 1\n return direction * di",
"def _discover_move(self, origin, direction):\n x, y = origin\n color = self[x][y]\n flips = []\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n if self[x][y] == 0:\n if flips:\n # print(\"Found\", x,y)\n return (x, y)\n else:\n return None\n elif self[x][y] == color:\n return None\n elif self[x][y] == -color:\n # print(\"Flip\",x,y)\n flips.append((x, y))",
"def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move",
"def get_relative_direction(self, source, destination, orientation):\r\n direction = {}\r\n direction['North'] = {}\r\n direction['North']['North'] = 'Straight'\r\n direction['North']['East'] = 'Right'\r\n direction['North']['West'] = 'Left'\r\n direction['East'] = {}\r\n direction['East']['East'] = 'Straight'\r\n direction['East']['North'] = 'Left'\r\n direction['East']['South'] = 'Right'\r\n direction['South'] = {}\r\n direction['South']['South'] = 'Straight'\r\n direction['South']['East'] = 'Left'\r\n direction['South']['West'] = 'Right'\r\n direction['West'] = {}\r\n direction['West']['West'] = 'Straight'\r\n direction['West']['South'] = 'Left'\r\n direction['West']['North'] = 'Right'\r\n if type(orientation) != str or orientation not in ['North', 'East', 'South', 'West']:\r\n return None\r\n final_orientation = self.get_orientation_from_to(source, destination)\r\n if orientation == final_orientation:\r\n return 'Straight'\r\n else:\r\n print orientation\r\n print final_orientation\r\n return direction[orientation][final_orientation]",
"def get_starting_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[0].y\n return delta_y, delta_x",
"def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'",
"def get_direction(event):\n return event['result']['parameters']['direction']",
"def get_ending_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[-1].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[-1].y\n return delta_y, delta_x",
"def direction(self):\n if self._is_hit:\n return Direction.NOT_MOVING\n return self._dir",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def decide_turn_direction(self, next_direction):\n\n # if facing backwards, try to initially go forwards.\n if GPIO.input(pins[\"DirectionMotorRight\"]):\n GPIO.output(pins[\"DirectionMotorRight\"], GPIO.LOW)\n GPIO.output(pins[\"DirectionMotorLeft\"], GPIO.HIGH)\n self.facing = not self.facing\n\n if self.facing == \"Right\":\n if next_direction == \"Down\":\n return \"Right\"\n elif next_direction == \"Up\":\n return \"Left\"\n else: # Left\n return \"Left\"\n\n elif self.facing == \"Left\":\n if next_direction == \"Down\":\n return \"Left\"\n elif next_direction == \"Up\":\n return \"Right\"\n else: # Right\n return \"Right\"\n\n elif self.facing == \"Up\":\n if next_direction == \"Right\":\n return \"Right\"\n elif next_direction == \"Left\":\n return \"Left\"\n else: # Down\n return \"Left\"\n\n else: # down\n if next_direction == \"Right\":\n return \"Left\"\n elif next_direction == \"Left\":\n return \"Right\"\n else: # Up\n return \"Right\"",
"def _route_to_goal(self, position, orientation):\n _, (_x,_y) = self._calc_torus_distance(position, self.goal)\n move = None\n\n if orientation == 'up':\n if self.goal[1] > position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] < position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'down':\n if self.goal[1] < position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] > position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n\n if orientation == 'right':\n if self.goal[0] < position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] > position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'left':\n if self.goal[0] > position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] < position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n\n return move",
"def filter_direction(frame, direction):\n return frame[frame['direction'] == direction].copy()",
"def get_next_destination(self) -> Location:\n # Find index of current location in route\n i = 0\n while i < len(self.route):\n this_loc = self.route[i]\n #\n if i > 0 and this_loc == self.route[-1]:\n return None\n if this_loc == self.current_location:\n return self.route[i + 1]\n i += 1",
"def attacking_direction_from_frame(frame: Frame) -> AttackingDirection:\n avg_x_home = avg(\n [\n coordinates.x\n for player, coordinates in frame.players_coordinates.items()\n if player.team.ground == Ground.HOME\n ]\n )\n avg_x_away = avg(\n [\n coordinates.x\n for player, coordinates in frame.players_coordinates.items()\n if player.team.ground == Ground.AWAY\n ]\n )\n\n if avg_x_home < avg_x_away:\n return AttackingDirection.HOME_AWAY\n else:\n return AttackingDirection.AWAY_HOME",
"def get_orientation_from_to(self, source, destination):\r\n if destination not in source.get_neighbors():\r\n return None\r\n return [x for x in source.neighbors.keys() if source.neighbors[x] == destination][0]",
"def find_direction(self):\n\t\tif self.direction == OUTPUT.MOTOR_UP:\n\t\t\tfor floor in xrange(self.currentFloor+1, config.NUM_FLOORS):\n\t\t\t if self.orderQueue.has_order_in_floor(floor):\n\t\t\t\t\treturn OUTPUT.MOTOR_UP\n\t\t\treturn OUTPUT.MOTOR_DOWN\n\t\telse:\n\t\t\tfor floor in xrange(self.currentFloor-1, -1, -1):\n\t\t\t\tif self.orderQueue.has_order_in_floor(floor):\n\t\t\t\t\treturn OUTPUT.MOTOR_DOWN\n\t\t\treturn OUTPUT.MOTOR_UP\n\t\treturn OUTPUT.MOTOR_UP",
"def get_dir_from_path(self):\n try:\n next_step = self.return_path[0]\n if next_step[0] > self.tile[0]:\n return 'd' # move up next\n if next_step[0] < self.tile[0]:\n return 'u' # move down next\n if next_step[1] > self.tile[1]:\n return 'r' # move right next\n if next_step[1] < self.tile[1]:\n return 'l' # move left next\n except IndexError as ie:\n print('Error while trying to get new path direction', ie)\n return None",
"def get_direction(self):\n return self.actual_coordinates[2]",
"def getDirection(self, location, target):\n # store the distance from the target to the location as dx and dy\n dx = target[0] - location[0]\n dy = target[1] - location[1]\n\n # if the x distance is less than 0, face West\n if dx < 0:\n dx = -1\n # or if the x distance is greater than 0, face East\n elif dx > 0:\n dx = 1\n\n # if the y distance is less than 0, face North\n if dy < 0:\n dy = -1\n # or if the y distance is greater than 0, face South\n elif dy > 0:\n dy = 1\n\n # return the direction as a set of coordinates\n return (dx, dy)",
"def convertdirection(self, frame):\n return _coordsys.coordsys_convertdirection(self, frame)",
"def get_action_for_move(\n agent_position: Tuple[int, int],\n agent_direction: Grid4TransitionsEnum,\n next_agent_position: Tuple[int, int],\n next_agent_direction: int,\n rail: GridTransitionMap) -> Optional[RailEnvActions]:\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n valid_action = RailEnvActions.MOVE_FORWARD\n new_direction = (agent_direction + 2) % 4\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif num_transitions == 1:\n valid_action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n valid_action = RailEnvActions.MOVE_FORWARD\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction + 1) % 4:\n valid_action = RailEnvActions.MOVE_RIGHT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction - 1) % 4:\n valid_action = RailEnvActions.MOVE_LEFT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action",
"def get_direction(curr_pos, next_pos):\n if curr_pos == next_pos:\n return 'CLEAN'\n\n v_dist = next_pos[0] - curr_pos[0]\n h_dist = next_pos[1] - curr_pos[1]\n\n if h_dist != 0:\n if h_dist < 0:\n return 'LEFT'\n else:\n return 'RIGHT'\n else:\n if v_dist < 0:\n return 'UP'\n else:\n return 'DOWN'",
"def tryDirection(d, currentRoom):\n attrib = d + '_to'\n\n # See if the room has the destination attribute\n if hasattr(currentRoom, attrib):\n # If so, return its value (the next room)\n return getattr(currentRoom, attrib)\n\n # Otherwise print an error and stay in the same room\n print(\"Where do you think your going?\")\n\n return currentRoom",
"def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1"
] | [
"0.694612",
"0.6346096",
"0.6220887",
"0.61938536",
"0.6123104",
"0.606547",
"0.60267913",
"0.59429306",
"0.5911182",
"0.5875572",
"0.5870038",
"0.5853388",
"0.5850153",
"0.5848268",
"0.5836786",
"0.58342284",
"0.5824965",
"0.58235735",
"0.58171266",
"0.5813398",
"0.5795338",
"0.5785315",
"0.57811564",
"0.5779963",
"0.57761836",
"0.57693076",
"0.57564294",
"0.57510984",
"0.5744205",
"0.57422006"
] | 0.6649756 | 1 |
Given a frame from the camera, figure out the line error | def get_line_error(im):
### Crop the picture
height = len(im)
width = len(im[0])
im = im[height/CROP_RATIO:-height/CROP_RATIO, width/CROP_RATIO:-width/CROP_RATIO]
### thresholding. susceptible to glare, solve with masking tape?
thresh = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
success, thresh = cv2.threshold(thresh, BW_THRESHOLD, 255, cv2.THRESH_BINARY)
if not success:
print "Could not threshold frame, skipping."
return None
### edge detection. constants here are magic
canny = cv2.Canny(thresh, 180, 220, apertureSize = 3)
### contour detection
contours, _ = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if len(contours) < 1:
return None
sorted_contours = sorted(contours, key=lambda x:cv2.arcLength(x,False), reverse=True)
## JUST FOR TESTING
# longest contours
if DEBUG_MODE:
cv2.drawContours(im,sorted_contours[0:2],-1,(0,255,0),3) # draw longest contour
cv2.imshow('lines',im)
k = cv2.waitKey(5)
if k == 27:
cv2.destroyAllWindows()
return None
### Find x coordinates of endpoints
if len(sorted_contours) == 0:
print "No contours found, skipping"
return None
# get points for the longest contours
mask = numpy.zeros(im.shape,numpy.uint8)
cv2.drawContours(mask,[sorted_contours[0]],0,255,-1)
pixelpoints = numpy.transpose(numpy.nonzero(mask))
xTop_one = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format
xBottom_one = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify
if len(sorted_contours) > 1: # we have more than one contour
mask = numpy.zeros(im.shape,numpy.uint8)
cv2.drawContours(mask,[sorted_contours[1]],0,255,-1)
pixelpoints = numpy.transpose(numpy.nonzero(mask))
xTop_two = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format
xBottom_two = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify
# average two longest contours if available
if len(sorted_contours) == 1:
xTop = xTop_one
xBottom = xBottom_one
else:
xTop = (xTop_one + xTop_two) / 2
xBottom = (xBottom_one + xBottom_two) / 2
### Calculate offset to return
### (XTop - XBottom) + (XTop - CENTER)
### CENTER = TRUE_CENTER - CENTER_OFFSET
MOST_POSITIVE_VAL = 3*len(im[0])/2 + CENTER_OFFSET
MOST_NEGATIVE_VAL = -3*len(im[0])/2 + CENTER_OFFSET
adjusted_midpoint = len(im[0])/2 - CENTER_OFFSET
#unscaled_error = xTop - xBottom + 2*(xTop - adjusted_midpoint)
unscaled_error = xTop - adjusted_midpoint
if unscaled_error == 0:
return 0.0
if unscaled_error > 0:
scaled_error = float(unscaled_error)/MOST_POSITIVE_VAL
if abs(scaled_error) > 1.0:
print "Warning: scaled_error value greater than 1.0: " + scaled_error
return min(scaled_error, 1.0)
else:
scaled_error = float(unscaled_error)/abs(MOST_NEGATIVE_VAL)
if abs(scaled_error) > 1.0:
print "Warning: scaled_error value less than -1.0: " + scaled_error
return max(scaled_error, -1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_linear_track_error(self):\r\n return self._arm.get_linear_track_error()",
"def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err",
"def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors",
"def compute_speed_and_line_error(current_frame, scanner, code): \n next_direction = get_next_direction(current_frame, scanner, code)\n if next_direction == 'STOP':\n return STOP\n\n line_error = get_line_error(current_frame)\n if line_error is None:\n return None\n\n return (2, line_error)",
"def _getErrorFunction(self):\n\n\t\treturn (self._setpoint - self._current)",
"def snapFrame(camera):\n return camera.read()[1]",
"def get_error(self, camera: int = 0) -> str:\n return self.sources[camera].getError()",
"def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road",
"def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]",
"def average_slope_intercept(frame, line_segments):\n try:\n lane_lines = []\n height, width = frame.shape\n left_fit = []\n right_fit = []\n Ys = []\n cords = []\n ml = 0\n mr = 0\n boundary = 1 / 2\n left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen\n right_region_boundary = width * boundary # right lane line segment should be on right 1/3 of the screen\n for line_segment in line_segments:\n for x1, y1, x2, y2 in line_segment:\n if x1 == x2:\n continue\n Ys += [y1, y2]\n min_y = min(Ys)\n max_y = 700\n fit = np.polyfit((x1, x2), (y1, y2), 1)\n slope = fit[0]\n intercept = fit[1]\n if slope < 0:\n if x1 < left_region_boundary and x2 < left_region_boundary:\n left_fit.append((slope, intercept))\n else:\n if x1 > right_region_boundary and x2 > right_region_boundary:\n right_fit.append((slope, intercept))\n\n left_fit_average = np.average(left_fit, axis=0)\n if len(left_fit) > 0:\n x1 = (min_y - left_fit_average[1]) / left_fit_average[0]\n x2 = (max_y - left_fit_average[1]) / left_fit_average[0]\n cords.append([[int(x1), int(min_y), int(x2), int(max_y)]])\n ml = 1\n else:\n ml = 0\n\n right_fit_average = np.average(right_fit, axis=0)\n if len(right_fit) > 0:\n x1 = (min_y - right_fit_average[1]) / right_fit_average[0]\n x2 = (max_y - right_fit_average[1]) / right_fit_average[0]\n cords.append([[int(x1), int(min_y), int(x2), int(max_y)]])\n mr = 1\n else:\n mr = 0\n\n # print(ml, mr)\n return cords, ml, mr\n except:\n return 0, 0, 0",
"def average_slope_intercept(frame, line_segments):\r\n lane_lines = []\r\n if line_segments is None:\r\n #logging.info('No line_segment segments detected')\r\n return lane_lines\r\n\r\n height, width, _ = frame.shape\r\n left_fit = []\r\n right_fit = []\r\n\r\n boundary = 1/3\r\n left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen\r\n right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen\r\n\r\n for line_segment in line_segments:\r\n for x1, y1, x2, y2 in line_segment:\r\n if x1 == x2:\r\n #logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)\r\n continue\r\n fit = np.polyfit((x1, x2), (y1, y2), 1)\r\n slope = fit[0]\r\n intercept = fit[1]\r\n if slope < 0:\r\n if x1 < left_region_boundary and x2 < left_region_boundary:\r\n left_fit.append((slope, intercept))\r\n else:\r\n if x1 > right_region_boundary and x2 > right_region_boundary:\r\n right_fit.append((slope, intercept))\r\n\r\n left_fit_average = np.average(left_fit, axis=0)\r\n if len(left_fit) > 0:\r\n lane_lines.append(make_points(frame, left_fit_average))\r\n\r\n right_fit_average = np.average(right_fit, axis=0)\r\n if len(right_fit) > 0:\r\n lane_lines.append(make_points(frame, right_fit_average))\r\n\r\n #logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]\r\n\r\n return lane_lines",
"def accr_lum(L_line, tracer, L_line_err = 0*u.W):\n \n a, a_err, b, b_err = rel['a'][tracer],rel['a_err'][tracer],rel['b'][tracer],rel['b_err'][tracer]\n \n log_L_acc = b + a * log10(L_line*u.W/L_sun)\n \n L_acc = 10**log_L_acc*L_sun/u.W\n \n #error propagation\n \n #c_err = (L_line_err)/(log(10) * L_line)\n #ac_err = a * log10(L_line/L_sun) * ((a_err/a)**2 + (c_err/log10(L_line/L_sun))**2)**0.5\n #log_L_acc_err = (b_err**2 + ac_err**2)**0.5\n #L_acc_err = L_acc * log(10) * log_L_acc_err\n\n return L_acc",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def get_error(intercept, slope, points):\n error_value = 0\n for i in range(0, len(points)):\n error_value += (points[i].y - (slope * points[i].x + intercept)) ** 2\n return error_value / float(len(points))",
"def get_ft_sensor_error(self):\r\n return self._arm.get_ft_sensor_error()",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def validate_timecode_input(self):\n frame = self.file_buffer.get_image(self.frame_offset)\n try:\n test = frame.shape\n except Exception as e:\n print(e)\n return False\n else:\n return True\n finally:\n test = None\n frame = None",
"def getlineno(frame):\r\n # FrameType.f_lineno is now a descriptor that grovels co_lnotab\r\n return frame.f_lineno",
"def get_real_frame(self, frame: int) -> Tuple[bool, int, int]:\n # if we use half resolution -> the frame is correct as it is\n if self.half_resolution:\n return True, frame, -1\n\n # Even -> the frame info is not contained in the dataset\n if (frame % 2) == 0:\n real_frame = frame//2\n return False, real_frame, real_frame+1\n else: # Odd -> return real frame\n return True, frame//2 + 1, -1",
"def line(self) -> int:",
"def diff_of_errors(self):\n self.e_of_e = self.azimuth_error - self.altitude_error\n return self.e_of_e",
"def transform_line_to_scanner_frame(line, x, tf_base_to_camera, compute_jacobian=True):\n alpha, r = line\n\n ########## Code starts here ##########\n # TODO: Compute h, Hx\n # HINT: Calculate the pose of the camera in the world frame (x_cam, y_cam, th_cam), a rotation matrix may be useful.\n # HINT: To compute line parameters in the camera frame h = (alpha_in_cam, r_in_cam), \n # draw a diagram with a line parameterized by (alpha,r) in the world frame and \n # a camera frame with origin at x_cam, y_cam rotated by th_cam wrt to the world frame\n # HINT: What is the projection of the camera location (x_cam, y_cam) on the line r? \n # HINT: To find Hx, write h in terms of the pose of the base in world frame (x_base, y_base, th_base)\n\n\n ########## Code ends here ##########\n\n if not compute_jacobian:\n return h\n\n return h, Hx",
"def error(Y, X):\n return (Y - X) ** 2",
"def clean_linear_track_error(self):\r\n return self._arm.clean_linear_track_error()",
"def __init__(self, roi_warped_points):\n\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n #average x values of the fitted line over the last n iterations\n self.bestx = None\n #polynomial coefficients averaged over the last n iterations\n self.best_fit = [np.array([False])]\n #polinomial coefficients for the last n fits of the lane\n self.recent_fit = []\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = 0\n #distance in meters of vehicle center from the line\n self.line_base_pos = 0\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n #x values for detected line pixels\n self.allx = None\n #maximum number of iterations to average\n self.max_n = 10 #25\n\n # roi image points in bird's view space\n self.roi_warped_points = roi_warped_points\n\n #y values for detected line pixels\n self.ally = np.linspace(0, self.roi_warped_points[2][1] - 1, self.roi_warped_points[2][1])\n\n # line base pos is calculated through the roi information\n # the used four point ROI has two points at the bottom that are straight\n # with respect to the bottom - as this points are right next to the lines,\n # they can be translated from pixels into meters with the knowledge of\n # a U.S. highway standard lane - this is an apprximation, but should be\n # good enough for this project\n # U.S. regulations minimum lane width: 3.7m\n self.xm_per_pix = 3.7 / (self.roi_warped_points[1][0] - self.roi_warped_points[0][0])\n\n # each dashed line is 3m long --> about 33m for warped image\n self.ym_per_pix = 33 / (self.roi_warped_points[2][1] - self.roi_warped_points[0][1])",
"def LineDetection(image,color,colorformat=\"rgb\",nbPoints=20):\n\n # Shape of the image\n height = image.shape[0]\n width = image.shape[1]\n\n # Initialization of point list\n points = [(0,0)]\n\n # Color choise\n if color == 'BLACK' or color == 'black':\n color = BLACK\n elif color == 'WHITE' or color == 'white':\n color = WHITE\n elif color == 'RED' or color == 'red':\n color = RED\n elif color == 'GREEN' or color == 'green':\n color = GREEN\n elif color == 'BLUE' or color == 'blue':\n color = BLUE \n elif color == 'YELLOW' or color == 'yellow':\n color = YELLOW\n elif color == 'ORANGE' or color == 'orange':\n color = ORANGE \n else :\n color = np.fliplr(np.uint8(color)) # RGB to BGR convertion\n\n if colorformat == 'HSV' or colorformat == 'hsv':\n color = np.fliplr(color) # BGR to RGB convertion for hsv conversion\n color = cv2.cvtColor(np.array([color]), cv2.COLOR_BGR2HSV)[0]\n\n # Moment calculation,for nbPoints strip, of the mask to find the center of the color\n for i in range(height//nbPoints,height,height//nbPoints):\n strip = image[i-height//nbPoints:i]\n mask = cv2.inRange(strip,color[0],color[1])\n M = cv2.moments(mask)\n if M['m00'] > 0:\n cx = int (M[ 'm10' ] /M[ 'm00' ] )\n cy = int (M[ 'm01' ] /M[ 'm00' ] )\n points.append((cx,cy+i-height//nbPoints))\n\n return points[::-1]\t# Return reverse list",
"def calculate_error(self, estimated_x, estimated_y):\n\n return np.sqrt((self.ball_x - estimated_x) ** 2 + (self.ball_y - estimated_y) ** 2)",
"def getFrame(this, error=3, **kargs):\n\t\tthis.checkInit()\n\t\t\n\t\t# Arguments\n\t\terror = kargs.get('error', 3)\n\t\tnoReset = kargs.get('noReset', False)\n\t\t\n\t\t# \"\"\"\n\t\twhile error >= 0:\n\t\t\tret, frame = this._CAP.read()\n\t\t\tif ret:\n\t\t\t\ta = this._BAND.x * height(frame)\n\t\t\t\tb = this._BAND.y * height(frame)\n\t\t\t\tframe = frame[a:b:this._RES,:,:]\n\t\t\t\tif this._KERNEL is not None: # On applique un flou uniquement pour lisser le bruit\n\t\t\t\t\tframe = cv2.filter2D(frame, -1, this._KERNEL)\n\t\t\t\tthis._FRAME = this.onFrameGet(frame)\n\t\t\t\tbreak #bye\n\t\t\t\n\t\t\t# On a pas eu d'image...\n\t\t\telse: error -= 1\n\t\t\t\n\t\t# On doit reset ou pas ?\n\t\tif not noReset: this.resetBin()\n\t\treturn ret",
"def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames",
"def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames"
] | [
"0.6348194",
"0.6194607",
"0.6192224",
"0.5972827",
"0.5821756",
"0.58052456",
"0.57963437",
"0.5735557",
"0.5705652",
"0.5681445",
"0.5633082",
"0.5596675",
"0.55598956",
"0.5546809",
"0.5519722",
"0.54857236",
"0.5475193",
"0.54616123",
"0.5458455",
"0.54289025",
"0.5419931",
"0.5387738",
"0.5363272",
"0.5333314",
"0.53309727",
"0.52914506",
"0.52760273",
"0.52629334",
"0.52539814",
"0.52539814"
] | 0.67456424 | 0 |
Given a frame from the camera, figure out the desired speed and current line error | def compute_speed_and_line_error(current_frame, scanner, code):
next_direction = get_next_direction(current_frame, scanner, code)
if next_direction == 'STOP':
return STOP
line_error = get_line_error(current_frame)
if line_error is None:
return None
return (2, line_error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_acc_frame(velocity, step_size, frame, vel_start_frame):\n #The offset required due to the velocities starting a vel_start_frame\n acc_offset = frame - vel_start_frame + 1\n if ((acc_offset) < step_size):\n raise IndexError(\"Acceleration cannot be calculated for this frame\")\n else:\n try:\n acc = (velocity[acc_offset - 1] - velocity[acc_offset - 1 - step_size]) / step_size\n return acc\n #return round(acc,2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")",
"def calc_vel_frame(position, step_size, frame):\n if (frame < step_size):\n raise IndexError(\"Frame must be greater than step size\")\n else:\n try:\n vel = (position[frame - 1] - position[frame - 1 - step_size]) / step_size\n return vel\n except IndexError:\n print(\"Frame or step_size out of bounds\")",
"def get_state(self, frames=3):\n if frames<2:\n raise ValueError('Needs at least 2 frames to determine velocity')\n self.flush_buffer()\n start_t = time.time()\n \n # time this to make sure we aren't blocking on get_pos for too long\n puck_history = []\n time_history = []\n p_pos, p_vel, p_pos_test = [0,0], [0,0], [0,0]\n s1_pos, s2_pos = [0,0], [0,0]\n for i in range(frames):\n _, frame = self.cam.read()\n t = time.time()-start_t\n p = self.get_pos(frame)\n \n if p[0] is not None:\n puck_history.append(p[0])\n time_history.append(t)\n # choose last nonzero striker locations\n if p[0] is not None:\n p_pos_test = p[0]\n if p[1] is not None:\n s1_pos = p[1]\n if p[2] is not None:\n s2_pos = p[2]\n \n # estimate puck position at current time\n if len(puck_history)==0:\n pass\n elif len(puck_history)==1:\n p_pos = puck_history[0]\n else:\n # do linear regression\n a = np.array([[t,1] for t in time_history])\n b = np.array(puck_history)\n m = np.linalg.lstsq(a, b, rcond=None)[0]\n \n t = np.array([[time.time()-start_t, 1]])\n p_pos = np.dot(t,m)[0]\n p_vel = m[:,0]\n \n return np.array([p_pos, p_vel, s1_pos, s2_pos])",
"def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road",
"def calc_avg_vel_frame(position, step_size, frame, avg_quantity):\n avg_disp = int(math.floor(avg_quantity / 2))\n\n if (frame < (step_size + avg_disp)):\n raise IndexError(\"Can not calculate for this frame\")\n else:\n try:\n position_avg = 0\n for i in range(frame - 1 - avg_disp, frame + avg_disp):\n position_avg += position[i]\n position_1 = position_avg / (avg_disp * 2 + 1)\n \n position_avg = 0\n for i in range(frame - 1 - avg_disp - step_size, frame + avg_disp - step_size):\n position_avg += position[i]\n position_2 = position_avg / (avg_disp * 2 + 1)\n\n vel = (position_1 - position_2) / step_size\n return vel\n #return round(vel, 2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")",
"def fps(self):\n\t\treturn float(len(self.buf)) / (self.buf[-1][0] - self.buf[0][0])",
"def fps(x, y, i):\n\n # Special case for the edges.\n if i < 2:\n return (y[i+1] - y[i]) / (x[i+1] - x[i])\n elif i > len(x) - 3:\n return (y[i] - y[i-1]) / (x[i] - x[i-1])\n\n else:\n h = x[i] - x[i-1]\n f0 = y[i]\n f1 = y[i+1]\n f2 = y[i+2]\n f3 = y[i-1]\n f4 = y[i-2]\n return (-f2 + 8*f1 - 8*f3 + f4) / (12 * h)",
"def calculate_speed(centre, prev_centre, time_step):\n if time_step != 0:\n y = centre[1] - prev_centre[1]\n x = centre[0] - prev_centre[0]\n return round(math.hypot(x, y) / (time_step * FRAME_W), 2)\n else:\n return 0",
"def calculate_fps(self):\n time_difference = self.time_array[-1] - self.time_array[0]\n time_difference_in_seconds = time_difference.to_sec()\n if time_difference_in_seconds == 0:\n pass\n self.fps = self.buffer_size / time_difference_in_seconds\n rospy.loginfo(\"[EulerianMotionMagnification] Estimated FPS: \" + str(self.fps) + \" (Measured timespan: \" + str(time_difference_in_seconds) + \"s)\")\n rospy.loginfo(\"[EulerianMotionMagnification] Video array length: \" + str(len(self.video_array)))",
"def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()",
"def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed",
"def emvCoreMotion(frames, fps, maxLevel, freqLow, freqHigh, alpha, lambdaC, chromAttenuation, method=\"ideal\"): \n pyrVideo_=buildVideoLapPyr(frames, maxLevel)\n \n if method==\"ideal\":\n filteredVideoPyr=idealFilterForVideoPyr(pyrVideo_, freqLow, freqHigh, fps)\n elif method==\"butt\":\n filteredVideoPyr=buttFilterForVideoPyr(pyrVideo_, freqLow, freqHigh, fps)\n \n amplifiedPyr = amplifyTemporalMotionSignal(pyrVideo_, filteredVideoPyr, alpha, lambdaC, chromAttenuation)\n recreateFrames=recreateVideoFromLapPyr(amplifiedPyr)\n \n return recreateFrames",
"def snapFrame(camera):\n return camera.read()[1]",
"def Motion_estimate_compute_1frame(ref1_frame,ref2_frame,target_frame,block_size):\n\n ref_Y, ref_U, ref_V = [], [], []\n for ref_frame in [ref1_frame,ref2_frame]:\n ref_Y.append(np.array(ref_frame[ :sep1]).reshape(height,width))\n ref_U.append(np.array(ref_frame[sep1:sep2]).reshape(height//2,width//2))\n ref_V.append(np.array(ref_frame[sep2: ]).reshape(height//2,width//2))\n \n tar_Y = target_frame[ :sep1].reshape(height,width)\n tar_U = target_frame[sep1:sep2].reshape(height//2,width//2)\n tar_V = target_frame[sep2: ].reshape(height//2,width//2)\n \n err_Y = np.array(tar_Y)\n err_U = np.array(tar_U)\n err_V = np.array(tar_V)\n vect_field = np.zeros((height//block_size,width//block_size,3),dtype=int)\n \n for X in range(0,height//block_size):\n for Y in range(0,width//block_size):\n xa, xz = X*block_size,(X+1)*block_size\n ya, yz = Y*block_size,(Y+1)*block_size\n # Find the motion vector for the block XY\n \n ref,vx,vy = Motion_estimate_compute_P_1block(ref_Y[0],ref_Y[1],\n tar_Y[xa:xz,ya:yz],\n [xa,ya])\n \n vect_field[X,Y,:] = np.array([ref,vx,vy])\n \n pxa, pxz = xa+vx,xz+vx\n pya, pyz = ya+vy,yz+vy\n \n patch_Y = ref_Y[ref][pxa:pxz,pya:pyz]\n patch_U = ref_U[ref][pxa//2:pxz//2,pya//2:pyz//2]\n patch_V = ref_V[ref][pxa//2:pxz//2,pya//2:pyz//2]\n \n err_Y[xa:xz,ya:yz] -= patch_Y\n err_U[xa//2:xz//2,ya//2:yz//2] -= patch_U\n err_V[xa//2:xz//2,ya//2:yz//2] -= patch_V\n \n frame_error = np.concatenate((err_Y.flatten(),\n err_U.flatten(),\n err_V.flatten()))\n dct_error = DCT_compute(frame_error,offset=0,Q='opti') # Error -> mean = 0\n # -> offset =0\n \n P_frame = np.concatenate((vect_field.flatten(),dct_error.flatten()))\n \n return P_frame",
"def ComputeLightTravelTime(Det1Pos, Det2Pos):\n\n # Get relative position vector\n Det21Pos = Det2Pos - Det1Pos\n \n # Dot difference vector into itself to get magnitude of detector separation\n dist = np.sqrt(np.dot(Det21Pos,Det21Pos))\n\n # Normalise with speed of light\n travelTime = dist/c\n\n return travelTime",
"def get_linear_track_error(self):\r\n return self._arm.get_linear_track_error()",
"def __init__(self):\n self.active = True # Camera activation control\n self.stream = cv2.VideoCapture(0) # Open video stream\n while not self.stream.isOpened():\n pass\n _,self.image = self.stream.read()# Save the first frame\n cv2.waitKey(10)\n self.frame = self.image[196:304,:546,:]# Cropped frame\n self.diff_frame = self.frame\n# self.reference_frame = copy.deepcopy(self.frame)\n# self.abs_diff_frame = copy.deepcopy(self.frame)\n self.reference_frame = self.frame\n self.abs_diff_frame = self.frame\n self.frame_count = 1 # Used for framerate estimation\n self.frame_rate = 0\n self.tic = time()",
"def main():\n \n #\n # Initialization\n #\n ref_time = time.time()\n output_string = '' \n cv2.namedWindow('frame', cv2.WINDOW_GUI_NORMAL+cv2.WINDOW_AUTOSIZE)\n \n #\n # Open the capture device and print some\n # useful properties\n #\n cap = cv2.VideoCapture(0)\n if cap.isOpened():\n #cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)\n #cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)\n \n frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)\n frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)\n \n print 'frame: width {}, height {}'.format(frameWidth, frameHeight)\n\n #\n # Parameters for Lucas-Kanade optical flow\n #\n lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n #\n # Predefine points to track\n #\n track_points = np.array([[[220.0, 120.0]],\n [[220.0, 200.0]],\n [[220.0, 280.0]],\n [[220.0, 360.0]],\n [[420.0, 120.0]],\n [[420.0, 200.0]],\n [[420.0, 280.0]],\n [[420.0, 360.0]]], 'float32')\n \n #\n # Take first frame and find corners in it\n #\n cap_ok, frame = cap.read()\n if not cap_ok:\n sys.exit()\n\n prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n print 'rel_time,p0dx,p0dy,p1dx,p1dy,p2dx,p2dy,p3dx,p3dy,p4dx,p4dy,p5dx,p5dy,p6dx,p6dy,p7dx,p7dy'\n\n while(True):\n\n cap_ok, frame = cap.read()\n if not cap_ok:\n break\n \n curr_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #\n # Calculate optical flow\n #\n next_points, st, err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame, track_points, None, **lk_params)\n\n #\n # Iterate through points and display on video frame\n # as well as output a CSV formated value list\n #\n for point_index in range(0, track_points.shape[0]):\n \n #\n # Display results on video frame\n #\n track_point = np.int0(track_points[point_index])\n x0,y0 = track_point.ravel()\n cv2.circle(frame, (x0,y0), 5, (0,255,0), -1)\n\n next_point = np.int0(next_points[point_index])\n x1,y1 = next_point.ravel()\n cv2.circle(frame, (x1,y1), 5, (0,0,255), -1)\n\n #\n # Build CSV string\n #\n output_string += ',{:.2f},{:.2f}'.format(x0-x1, y0-y1)\n \n #\n # Print out some data in a CSV format for graphing\n #\n now = time.time() - ref_time \n print '{:.2f}{}'.format(now, output_string)\n output_string = ''\n\n #\n # Display result and check for escape key\n #\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n #\n # Now update the previous frame and previous points\n #\n prev_frame = curr_frame.copy()\n\n cv2.destroyAllWindows()\n cap.release()",
"def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)",
"def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)",
"def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)",
"def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1",
"def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed",
"def get_speed(self):\r\n return self.__x_speed, self.__y_speed",
"def _speedDiff(self, position, speed, action):\n return (action/(1 + self._hill_diff(position)**2)\n - 9.81 * self._hill_diff(position) /\n (1 + self._hill_diff(position)**2)\n - ((self._hill_diff(position) * self._hill_diff_diff(position)) \n * (speed**2))/(1 + self._hill_diff(position)**2))",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames",
"def receptive_field(self):\n frames = 0\n for f in self.pad:\n frames += f\n return 1 + 2 * frames",
"def get_frame_vel(self, f):\n return self._frame_vels[f, :]"
] | [
"0.6475174",
"0.63403714",
"0.5990031",
"0.5899495",
"0.5873651",
"0.58731246",
"0.5788477",
"0.57712096",
"0.57585996",
"0.5747586",
"0.57444304",
"0.56175065",
"0.56138253",
"0.5610825",
"0.56059897",
"0.55898684",
"0.55860823",
"0.5576143",
"0.5572634",
"0.5566797",
"0.5545677",
"0.5542258",
"0.54771334",
"0.54713273",
"0.5466316",
"0.54612416",
"0.54612416",
"0.5407744",
"0.5407744",
"0.5390849"
] | 0.6958017 | 0 |
Initialize a Student instance with given name and the number of scores to associate with the given Student. | def __init__(self, name: str, number: float):
self._name = name
self._scores = [0] * number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, name, number):\n self._name = name\n self._scores = []",
"def __init__(self, name, age, student_id, courses):\n self.name = name\n self.age = age\n self.student_id = student_id\n self.courses = courses\n\n # When adding a student, increment the\n # class variable student_count\n Student.student_count += 1",
"def __init__(self, name, number):\n self.name = name\n self.scores = []\n for count in range(number):\n self.scores.append(0)",
"def __init__(self, name, surname):\n\t\t\n\t\tself.grades = {}\n\t\tself.attendance = 0\n\t\t\n\t\tif not (isinstance(name, str) and isinstance(surname, str)):\n\t\t\tname, surname = \"None\", \"None\"\n\t\tself.name, self.surname = name, surname",
"def __init__(self, name, score):\r\n self.name = name\r\n self.score = float(score)",
"def __init__(self, name):\n self.name = name\n self.maxidy = -1\n self.studentlist = []",
"def __init__(self, name):\n\n self._name = name.strip()\n self._total_score = 0\n self._current_score = 0\n self._total_rolls = 0\n self._last_roll = 0",
"def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill",
"def __init__(self, seq_name, first, last, score):\n self.sequence_name = seq_name\n self.first = int(first)\n self.last = int(last)\n self.score = int(score)",
"def __init__(self, first_name, last_name, address):\n\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n # Creates dictionary for each student with the label & info.\n\n self.info = {\n 'first name': self.first_name,\n 'last name': self.last_name,\n 'address': self.address,\n }",
"def __init__(self, name=\"Bob\"):\n self.name = name\n self.score = 0\n self.rolls = []",
"def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()",
"def __init__(self, name, address, phone, credit_score):\r\n\r\n self.name = name\r\n self.address = address\r\n self.phone = phone\r\n self.credit_score = credit_score",
"def __init__(self, name, ssn, address=\"\"):\n self.name = name\n self._ssn = ssn\n self.set_address(address)",
"def __init__(self, student):\n pass",
"def __init__(self, \n student_id=0,\n # - Arguments from Person\n given_name=None, initials=None, family_name=None, \n email_address=None,\n # - Other student-specific arguments\n major=None, minor=None\n ):\n # - We can use super() to call the parent class' __init__ \n # because there's only one parent class...\n super().__init__(\n given_name, initials, family_name, email_address\n )\n # - But we ALSO need to initialize properties that are \n # members of THIS class\n self.student_id = student_id\n self.major = major\n self.minor = minor",
"def __init__(self,student_id,lname,fname, major='Computer Science',gpa='0.0'):\n super().__init__(lname,fname) # Call init on parent class\n self._student_id = student_id\n self._major = major\n self._gpa = gpa",
"def __init__(self, score=0):\n self.score = score",
"def __init__(self, score = 0):\n self.score = score",
"def _create_students(self, num_students):\r\n return [self.create_student('robot%d' % i) for i in xrange(num_students)]",
"def __init__(self, N, S, students, leaders):\n self.N = N\n self.S = S\n self.G = int(math.ceil(N/S))\n self.partitions = []\n self.students = students\n self.leaders = leaders",
"def __init__(self):\n self.students = [] # List of Student objects.\n self.grades = {} # Dictionary to map IDNumber -> list of grades.\n self.isSorted = True # True if self.students is sorted.",
"def __init__(self, student, major_map):\n self.student = student\n self.major_map = major_map\n\n # matches \n self.general_studies = self.__general_studies_self()\n self.subject = self.__subject_self()\n self.exact = self.__exact_self()\n self.low_elective = self.__low_elective_self()\n self.up_elective = self.__up_elective_self()\n\n # expanded matches\n self.graph = self.__build_graph()\n\n # linear optimization lookup tables\n self.classes_from , self.requirements_to = self.build_opt()\n\n # \n self.expanded = self.class_combinations()\n self.all_combos = self.get_all_combinations()\n\n self.matches = self.___matches()",
"def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)",
"def __init__(self):\n self.students = {}",
"def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%[email protected]' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students",
"def __init__(self, name, ssn, address, courses_grades=None):\n super().__init__(name, ssn, address)\n if courses_grades is None:\n courses_grades = []\n if courses_grades == isinstance(courses_grades, list):\n self.courses_grades = courses_grades\n else:\n self.courses_grades = list(courses_grades)",
"def create(cls,\n name,\n position=None,\n average=None,\n count=None,\n maximum=None,\n minimum=None,\n stddev=None,\n variance=None):\n c = cls({})\n c.apply(name, )\n return c",
"def AddStudent(self, student_name):\n self.__data['s'].AddItems([Student(self.__data['s'].GetSafeKey(), student_name)])\n self.__undo_list.append(['s'])\n self.__redo_list.clear()",
"def __init__(self, teamName):\n \n for i in range (4):\n for j in range(16):\n if (teamName == pm.R1teams[i, j]):\n self.regional, self.position = i, j\n break\n \n self.totWin = pm.totWin[self.regional][self.position]\n self.awayWin = pm.awayWin[self.regional][self.position]\n self.recency = pm.recency[self.regional][self.position]\n self.seed = pm.seed[self.position]\n self.name = pm.R1teams[self.regional][self.position]\n \n # Define Parameter Weights\n weightTotWin = 1/3\n weightAwayWin = 1/2\n weightRecency = 1/6\n \n self.score = weightTotWin*self.totWin + weightAwayWin*self.awayWin + weightRecency*self.recency"
] | [
"0.68761146",
"0.6798942",
"0.660163",
"0.65491754",
"0.6526323",
"0.64837736",
"0.64603394",
"0.64021593",
"0.6373097",
"0.61487335",
"0.59534216",
"0.5864722",
"0.58540154",
"0.5832365",
"0.5794798",
"0.5733008",
"0.5677068",
"0.5636913",
"0.56352925",
"0.5627736",
"0.55593854",
"0.55550027",
"0.5541582",
"0.5537263",
"0.55188996",
"0.55040926",
"0.5490793",
"0.5488882",
"0.54870135",
"0.54217994"
] | 0.687974 | 0 |
Calculate the average score associated with this Student. | def get_average(self) -> float:
return sum(self._scores) / len(self._scores) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAverage(self):\n return sum(self.scores) / len(self.scores)",
"def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)",
"def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average",
"def average(self):\n return self.summation() / self.count()",
"def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average",
"def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average",
"def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3",
"def averaged_risk(self):\n return self._averaged_risk",
"def averaged_risk(self):\n return self._averaged_risk",
"def global_average_scores(self):\n\n return np.mean(self.average_scores_all_subjects(), axis=0)",
"def calc_mean_score(movies: List[Movie]) -> float:\n return round(sum([m.score for m in movies]) / len(movies), 1)",
"def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average",
"def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None",
"def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )",
"def get_average_grade_of_students(students):\n total_grade = 0\n for row in students:\n total_grade += int(row[5])\n return total_grade/len(students)",
"def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score",
"def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average",
"def average_rating(self):\n ratings = AttractionRating.objects.filter(attraction=self)\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n # If there are no rating, then we set the average to 0\n # otherwise we calculate the average\n try:\n avg = total_rating / len(ratings)\n except ZeroDivisionError:\n avg = total_rating\n\n return avg",
"def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])",
"def get_average_rating(self):\n count = 0\n total = 0\n ratings_length = len(self.ratings)\n if ratings_length > 0:\n for rating in self.ratings:\n count += 1\n total += rating\n average = total / count\n return average\n else:\n print(\"There does not seem to be any ratings for {book}\".format(book=self.title))",
"def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0",
"def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3 + self.rating_4 + self.rating_5 + self.rating_6 + self.rating_7) / 7",
"def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average",
"def get_average(value): # fine\r\n average_assignment = 0\r\n average_exam = 0\r\n student_count = 0\r\n if value == 'Assignment':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_assignment += int(student.assignment)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_assignment/student_count\r\n print('{:.2f}'.format(calc))\r\n elif value == 'Exam':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_exam += int(student.exam)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_exam/student_count\r\n print('{:.2f}'.format(calc))",
"def get_averages(self):\t\n\t\t\n\t\taverages = {}\n\t\tfor subject in self.grades.iterkeys():\n\t\t\taverages[subject] = float(sum(self.grades[subject])) / len(self.grades[subject])\n\t\treturn averages",
"def getScore(self):\n return sum(self.field)",
"def get_average_rating(self):\n count = 0\n total = 0\n num_books = len(self.books)\n if num_books > 0:\n for rating in self.books.values():\n if rating:\n count += 1\n total += rating\n average = total / count\n if count > 0:\n return average\n else:\n print(\"Books with ratings not found for user {user}\".format(user=self.name))",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)"
] | [
"0.79639894",
"0.73673606",
"0.7196625",
"0.71720856",
"0.7162906",
"0.71442413",
"0.70670134",
"0.7030095",
"0.7030095",
"0.6999512",
"0.69382113",
"0.69370216",
"0.6887286",
"0.68780196",
"0.6849795",
"0.6842055",
"0.6833306",
"0.6798177",
"0.6760756",
"0.6755839",
"0.67513216",
"0.6737335",
"0.67334545",
"0.6701781",
"0.6678274",
"0.6675613",
"0.6620255",
"0.6608491",
"0.6608491",
"0.6608491"
] | 0.78896713 | 1 |
Constructor creates a number with the given numerator and denominator and reduces it to lowest terms. | def __init__(self, numerator: int, denominator: int):
self._numerator = numerator
self._denominator = denominator
self._reduce() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, numerator, denominator=1):\n if (type(numerator) not in(int,float)):\n raise ValueError('Numerator must be a number')\n if (type(denominator) not in(int,float)):\n raise ValueError('Denominator must be a number')\n if denominator == 0:\n if numerator == 0:\n self.numerator = 0\n self.denominator = 1\n self.inf_size = 0\n else:\n self.numerator = 0\n self.denominator = 0\n self.inf_size = numerator\n else:\n self.inf_size = 0\n self.numerator = numerator\n self.denominator = denominator\n self.__make_denominator_integer()\n self.__make_numerator_integer()\n self.__reduce()",
"def __init__(self, numerator, denominator=1):\n gcd1 = math.gcd(numerator, denominator)\n\n if denominator < 0:\n self.numerator = -(int(numerator/gcd1))\n self.denominator = abs(int(denominator/gcd1))\n elif denominator == 0:\n self.numerator = 1\n self.denominator = 0\n else:\n self.numerator = int(numerator/gcd1)\n self.denominator = int(denominator/gcd1)",
"def __init__(self, numerator, denominator):\n if denominator == 0:\n raise ValueError(\"Denominator must be non-zero\")\n gcd = self.__gcd(numerator, denominator)\n self.numerator = numerator / gcd\n self.denominator = denominator / gcd",
"def __init__ (self,numerator,denominator=1):\n self.debug = False\n if (self.debug): print(f'enter fraction.__init__ with {numerator}, {denominator}')\n sign = int(numerator * denominator / abs(numerator * denominator))\n if (self.debug): print(f'enter sign is {sign}')\n self.value=(sign * abs(numerator),abs(denominator))\n self.simplify()",
"def __init__(self, numerator, denominator=1):\n common_div = gcd(numerator, denominator)\n if common_div == 0:\n common_div = 1\n self.numerator = int((numerator / common_div) / copysign(1, denominator))\n self.denominator = int((denominator / common_div) / copysign(1, denominator))\n if self.denominator != 0:\n self.equal = self.numerator / self.denominator\n elif self.denominator == 0:\n self.equal = \"UNDEFINED.\"",
"def __init__(self, numerator, denominator):\n\n if not isinstance(numerator, int) or not isinstance(denominator, int):\n raise TypeError\n elif denominator == 0:\n raise ValueError\n\n self.__numerator = numerator\n self.__denominator = denominator",
"def __init__(self,top,bottom):\n \n if (type(top) is not int or type(bottom) is not int):\n raise TypeError('Numerator and denominator must be integers.')\n elif bottom == 0:\n raise ZeroDivisionError('The denominator cannot be zero.')\n elif bottom < 0: #store 'minus' in the numerator of the fraction\n top *= -1\n bottom *= -1\n #make irreducible form\n self.num = top // gcd(top,bottom)\n self.den = bottom // gcd(top,bottom)",
"def __init__(self):\n self.numerator = 1\n self.denominator = 1",
"def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)",
"def __init__(self,numerator,denominator):\n self.numerator = numerator\n self.denominator = denominator\n if(denominator==0):\n raise ValueError",
"def __make_numerator_integer(self):\n while self.numerator % 1 !=0:\n self.denominator *=10\n self.numerator *=10",
"def __make_denominator_integer(self):\n while self.denominator % 1 !=0:\n self.denominator *=10\n self.numerator *=10",
"def __init__(self, num, denom):\n assert type(num) == int and type(denom) == int, \"ints not used\"\n self.num = num\n self.denom = denom\n def simplify(x, y):\n \"\"\" Simplifies a fraction \"\"\"\n if x % 2 > 0:\n if y % x > 0:\n # Check Prime\n prime = check_prime(x, y)\n if prime == 0:\n return str(int(x)) + \"/\" + str(int(y))\n else:\n return simplify ((x / prime), (y / prime))\n else:\n return str(int(x/x)) + \"/\" + str(int(y/x))\n else:\n return simplify ((x / 2), (y / 2))\n def check_prime(x, y):\n \"\"\" Function used by simplify to check prime number division of num and denom \"\"\"\n pri = (3,5,7,11,13,17,19,23)\n for i in pri:\n if (x % i == 0) and (y % i == 0):\n return i\n return 0",
"def __init__(self, n, d):\n # Check that n and d are of type int:\n if type(n) != int or type(d) != int:\n raise ValueError('requires type int')\n # Check that denominator is non-zero:\n if d == 0:\n raise ZeroDivisionError('requires non-zero denominator')\n # If we get here, n and d are ok => initialize Fraction:\n self.num = n\n self.denom = d\n self.reduce()",
"def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)",
"def _reduce(self) -> None:\n divisor = self._gcd(self._numerator, self._denominator)\n self._numerator = self._numerator // divisor\n self._denominator = self._denominator // divisor",
"def __init__(self, numer, denom=1, empty=False):\n self.numer = []\n self.denom = []\n self.neg = False\n\n # Initialize with no contents:\n # For private use only.\n if empty:\n return\n\n # If copy constructing another Fraction:\n if isinstance(numer, RationalFrac):\n self.numer = numer.numer\n self.denom = numer.denom\n self.neg = numer.neg\n return\n\n if numer == 0:\n self.numer = [0, ]\n return\n\n # If initialized with a float:\n elif isinstance(numer, float):\n if numer < 0.0:\n self.neg = True\n numer = abs(numer)\n exp = len(str(numer).split('.')[1])\n # numer = int(str(numer).split('.')[1])\n\n numer = int(round(numer * 10 ** exp))\n self.numer = RationalFrac.factorize(numer)\n self.denom = [2, 5] * exp\n\n # If initialized with a numerator and denominator:\n elif isinstance(numer, int) and isinstance(denom, int):\n self.numer = RationalFrac.factorize(abs(numer))\n self.denom = RationalFrac.factorize(abs(denom))\n self.neg = not ((numer < 0) == (denom < 0))\n if denom == 0:\n raise ZeroDivisionError(\n 'should not initialize with a denominator of zero.')\n\n # If initialized with a string version of a fraction:\n elif isinstance(numer, str):\n split = numer.strip().split('/')\n self.__init__(int(split[0]), int(split[1]))\n\n # Unexpected argument as initialization value:\n else:\n TypeError(\n f'{str(numer)} invalid. '\n f'must initialize with one of:\\n'\n 'int, float, str.')\n\n # If successful, cleanup:\n self.simplify()",
"def __init__(self, fraction: float = 1):\n pass",
"def __truediv__(self, other):\n try:\n assert not other.is_zero(), \"cannot divide by 0.\"\n new_num = (self._num * other._den)\n new_den = (self._den * other._num) \n return Rational(new_num, new_den)\n except AttributeError:\n return (self / Rational.parse_number(other))",
"def reduce(self):\n # assign absolute value of numerator and denominator to a new variable\n abs_num = abs(self.num)\n abs_denom = abs(self.denom)\n\n # get a gcd\n GCD = 1\n i = 1\n while i <= min(abs_num, abs_denom):\n if (abs_num % i == 0) and (abs_denom % i == 0):\n GCD = i\n i = i + 1\n if self.num < 0 and self.denom < 0:\n self.num = abs_num // GCD\n self.denom = abs_denom // GCD\n else:\n self.num = self.num // GCD\n self.denom = self.denom // GCD",
"def __init__( self, *arg ):\r\n\t\tif ( len( arg ) == 2 ):\r\n\t\t\t# if one of the arguments is a fraction:\r\n\t\t\tif ( type( self ) in ( type( arg[ 0 ] ), type( arg[ 1 ] ) ) ):\r\n\t\t\t\t# future division is broken, so simple division doesn't work here.\r\n\t\t\t\tif ( type( self ) == type( arg[ 0 ] ) == type( arg[ 1 ] ) ):\r\n\t\t\t\t\t self.numerator = arg[ 0 ].numerator * arg[ 1 ].denominator\r\n\t\t\t\t\t self.denominator = arg[ 0 ].denominator * arg[ 1 ].numerator\r\n\t\t\t\telif ( type( self ) == type( arg[ 0 ] ) ):\r\n\t\t\t\t\tself.numerator = arg[ 0 ].numerator\r\n\t\t\t\t\tself.denominator = arg[ 0 ].denominator * arg[ 1 ]\r\n\t\t\t\telse: #( type( self ) == type( arg[ 1 ] ) ):\r\n\t\t\t\t\tself.numerator = arg[ 0 ] * arg[ 1 ].denominator\r\n\t\t\t\t\tself.denominator = arg[ 1 ].numerator\r\n\t\t\t\t\t\r\n\t\t\telif ( type( arg[ 0 ] ) in FRACTION_VALID_TYPES ) and ( type( arg[ 1 ] ) in FRACTION_VALID_TYPES ):\r\n\t\t\t\tself.numerator = arg[ 0 ]\r\n\t\t\t\tif ( arg[ 1 ] ):\r\n\t\t\t\t\tself.denominator = arg[ 1 ]\r\n\t\t\t\telse:\r\n\t\t\t\t\traise ZeroDivisionError( \"Denominator of a fraction cannot be 0\" )\r\n\t\t\telse:\r\n\t\t\t\traise TypeError( \"Invalid type for Fraction Constructor\" )\r\n\r\n\t\telif ( len( arg ) == 1 ):\r\n\t\t\tif ( type( arg[ 0 ] ) in FRACTION_VALID_TYPES ):\r\n\t\t\t\t self.numerator = arg[ 0 ]\r\n\t\t\t\t self.denominator = 1\r\n\t\t\telif ( type( arg[ 0 ] ) == type( self ) ): # if the argument is a fraction, copy it.\r\n\t\t\t\tself.numerator = arg[ 0 ].numerator\r\n\t\t\t\tself.denominator = arg[ 0 ].denominator\r\n\t\t\telse:\r\n\t\t\t\ttry: # check to see if the object has a __fraction__ method that returns a fraction. If not, raise an error.\r\n\t\t\t\t\tf = arg[ 0 ].__fraction__( )\r\n\t\t\t\texcept AttributeError:\r\n\t\t\t\t\traise TypeError( \"Invalid type for fraction constructor\" )\r\n\t\t\t\tif ( type( f ) == type( self ) ):\r\n\t\t\t\t\tself = f\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError( \"__fraction__( ) method returns incorrect data type for fraction constructor\" )\r\n\t\telif not len( arg ):\r\n\t\t\tself.numerator = 0\r\n\t\t\tself.denominator = 1\r\n\t\telse:\r\n\t\t\traise TypeError( \"fraction constructor takes at most 2 arguments (%d given)\" % len( arg ) )\r\n\r\n\t\t#eliminate any float values, we don't need floats in a fraction.\r\n\t\tif ( types.FloatType in ( type( self.numerator ), type( self.denominator ) ) ):\r\n\t\t\tself.numerator, self.denominator = self._noFloats( self.numerator, self.denominator )\r\n\t\t\t\r\n\t\tself._reduce( )",
"def numerator(self):\n return +self",
"def __init__(self, time, numerator, denominator):\n self.time = time\n self.numerator = numerator\n self.denominator = denominator",
"def __init__(self, ratio=0.3, p=1.7, reduce=True):\n assert ratio > 0 and ratio <= 1, \"ratio should be in range [0, 1]\"\n assert p > 1, \"p should be >1\"\n self.ratio = ratio\n self.p = p\n self.reduce = reduce",
"def _create_divisor(x):\n return x if x != 0 else 1",
"def __truediv__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num, self.den * other)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num * other.den, self.den * other.num)",
"def normalize(b, a):\n num, den = b, a\n\n den = np.atleast_1d(den)\n num = np.atleast_2d(_align_nums(num))\n\n if den.ndim != 1:\n raise ValueError(\"Denominator polynomial must be rank-1 array.\")\n if num.ndim > 2:\n raise ValueError(\"Numerator polynomial must be rank-1 or\"\n \" rank-2 array.\")\n if np.all(den == 0):\n raise ValueError(\"Denominator must have at least on nonzero element.\")\n\n # Trim leading zeros in denominator, leave at least one.\n den = np.trim_zeros(den, 'f')\n\n # Normalize transfer function\n num, den = num / den[0], den / den[0]\n\n # Count numerator columns that are all zero\n leading_zeros = 0\n for col in num.T:\n if np.allclose(col, 0, atol=1e-14):\n leading_zeros += 1\n else:\n break\n\n # Trim leading zeros of numerator\n if leading_zeros > 0:\n warnings.warn(\"Badly conditioned filter coefficients (numerator): the \"\n \"results may be meaningless\", BadCoefficients)\n # Make sure at least one column remains\n if leading_zeros == num.shape[1]:\n leading_zeros -= 1\n num = num[:, leading_zeros:]\n\n # Squeeze first dimension if singular\n if num.shape[0] == 1:\n num = num[0, :]\n\n return num, den",
"def calceNumerator ( term , numeratorN1 , numeratorN2 ) :\n if term == limit :\n if term % 3 == 0 :\n return ( 2 * int ( term / 3 ) * numeratorN1 ) + numeratorN2\n return numeratorN1 + numeratorN2\n\n multiplier = 1\n if term % 3 == 0 :\n multiplier = 2 * int ( term / 3 )\n numerator = multiplier * numeratorN1 + numeratorN2\n\n return calceNumerator ( term + 1 , numerator , numeratorN1 )",
"def fromRational(rat):\n def longDiv(c, e, n):\n \"\"\"\n Divide the numerator by the denominator using long division.\n\n :param c: long\n :param e: long\n :param n: long\n :return: Scientific\n \"\"\"\n if n == 0:\n return Scientific(c, e)\n else:\n # TODO: Use a logarithm here!\n # TODO: Can't use tail recursion like this in python!\n if n < d:\n return longDiv(c * 10, e - 1, n * 10)\n else:\n (q, r) = quotRemInteger(n, d)\n return longDiv(c+q, e, r)\n\n d = rat.denominator\n\n if d == 0:\n raise ZeroDivisionError\n else:\n return rational.positivize(longDiv(0, 0), rat.numerator)",
"def divide(numerator, denominator, num_decimal_places):\n \n if denominator == 0 or num_decimal_places < 0:\n raise ValueError\n\n pieces = []\n\n # determine sign of final number, then make both numerator and denominator\n # positive for simplicity\n sign = 1\n if numerator < 0:\n sign = -sign\n numerator = -numerator\n if denominator < 0:\n sign = -sign\n denominator = -denominator\n if sign < 0:\n pieces.append('-')\n\n # determine integral part\n num_units = 0\n while numerator >= denominator:\n numerator -= denominator\n num_units += 1\n pieces.append(str(num_units))\n\n pieces.append('.')\n\n # determine fractional part\n for _ in range(num_decimal_places):\n numerator *= 10\n num_units = 0\n while numerator >= denominator:\n numerator -= denominator\n num_units += 1\n pieces.append(str(num_units))\n\n return float(''.join(pieces))"
] | [
"0.7272838",
"0.71475744",
"0.6812566",
"0.6804737",
"0.6622814",
"0.6595566",
"0.6460698",
"0.6444287",
"0.64000016",
"0.6399492",
"0.63688904",
"0.6362692",
"0.6287831",
"0.62010443",
"0.60404706",
"0.5914123",
"0.57778704",
"0.5751536",
"0.5651751",
"0.560133",
"0.5544102",
"0.54900086",
"0.5460345",
"0.5431319",
"0.5425829",
"0.53691024",
"0.5362038",
"0.53409314",
"0.5338635",
"0.5308921"
] | 0.7570159 | 0 |
Returns the current pin. | def get_pin(self) -> str:
return self._pin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pin(self) -> int:",
"async def get_pin_thread(self) -> int:\n return await self.AD.threading.get_pin_thread(self.name)",
"def get_current_address(self):\n pass",
"def read(self):\n if self.mode == UNAVAILABLE:\n raise IOError, \"Cannot read pin %s\"% self.__str__()\n return self.value",
"async def get_app_pin(self) -> bool:\n return await self.AD.threading.get_app_pin(self.name)",
"def _getCurrentPoint(self):\n return self.__currentPoint",
"def get_pi(self):\n return self.__pi",
"def get_current(self):\n return self.node.sdo[0x221c].phys # mA",
"def analog_read(self, pin):\n return self.analog_pins_analog_numbering[pin].current_value",
"def get_current(self) -> int:\n return self._current",
"def pin_state(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n value = gpio.input(port_num)\n return value",
"def getCurrent(self):\n return self.__current",
"def get_auth_pin(self):\n url = url_base + \"authorize\"\n params = {\"response_type\": \"ecobeePin\",\n \"client_id\": app_key,\n \"scope\": \"smartWrite\"}\n resp = requests.get(url, params=params)\n try:\n resp_json = resp.json()\n except:\n raise ValueError(\"Response Could not be translated to json {}\".format(resp))\n return resp_json[\"ecobeePin\"], resp_json[\"code\"]",
"def get_current(self):\n return self.current",
"def get_current(self):\n return self.x",
"def get_pin(self, pin_def):\n if type(pin_def) == list:\n bits = pin_def\n else:\n bits = pin_def.split(':')\n a_d = bits[0] == 'a' and 'analog' or 'digital'\n part = getattr(self, a_d)\n pin_nr = int(bits[1])\n if pin_nr >= len(part):\n raise InvalidPinDefError('Invalid pin definition: %s at position 3 on %s' % (pin_def, self.name))\n if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE:\n raise InvalidPinDefError('Invalid pin definition: UNAVAILABLE pin %s at position on %s' % (pin_def, self.name))\n if self.taken[a_d][pin_nr]:\n raise PinAlreadyTakenError('%s pin %s is already taken on %s' % (a_d, bits[1], self.name))\n # ok, should be available\n pin = part[pin_nr]\n self.taken[a_d][pin_nr] = True\n if pin.type is DIGITAL:\n if bits[2] == 'p':\n pin.mode = PWM\n elif bits[2] == 's':\n pin.mode = SERVO\n elif bits[2] is not 'o':\n pin.mode = INPUT\n else:\n pin.enable_reporting()\n return pin",
"async def encoder_read(self, pin):\n return self.digital_pins[pin].current_value",
"async def encoder_read(self, pin):\n return self.digital_pins[pin].current_value",
"def postal_code_current(self, instance):\r\n return instance.user.profile.postal_code_current",
"async def analog_read(self, pin):\n return self.analog_pins[pin].current_value",
"def current(self):\n with driver.get_active_context() as ac:\n devnum = ac.devnum\n if devnum is not None:\n return self[devnum]",
"def pinnacle(self):\n return self._pinnacle",
"def GetCurrentOffset():\r\n return GetData().offsetCurrent",
"def pinterest(self):\n return self._pinterest",
"def get_current_location(self):\n return self._current_loc",
"def address1_current(self, instance):\r\n return instance.user.profile.address1_current",
"async def digital_read(self, pin):\n return self.digital_pins[pin].current_value",
"def is_on(self):\r\n self._state = self.iobus.read_pin(self.targetpin)\r\n return self._state",
"def _http_get_current_id(self):\n return self._http_request('').json()['currentplid']",
"def read_pin10(self):\n return self.PIN_10_SIGNAL_TEST"
] | [
"0.68164206",
"0.6619968",
"0.64361113",
"0.64119536",
"0.632375",
"0.6322167",
"0.62348837",
"0.6177347",
"0.6095625",
"0.6095085",
"0.6087233",
"0.6051547",
"0.6041345",
"0.6023839",
"0.59682727",
"0.59486204",
"0.59245473",
"0.59245473",
"0.59241426",
"0.59072316",
"0.5895448",
"0.5887845",
"0.58572847",
"0.5834438",
"0.58132374",
"0.5801159",
"0.5793416",
"0.5785716",
"0.5783795",
"0.57732207"
] | 0.80345154 | 0 |
If the amount is valid, subtract it from the balance and returns None; otherwise, returns an error message. | def withdraw(self, amount):
if amount < 0:
return "Amount must be >= 0"
elif self._balance < amount:
return "Insufficient funds"
else:
self._balance -= amount
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fail_balance_negative(self):\n self.bundle.transactions[3].value -= 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Bundle has invalid balance (expected 0, actual -1).',\n ],\n )",
"def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')",
"def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance",
"def subtract(self, amount: float, reason: str = \"\") -> \"Bank\":\n\n if amount == 0: # Pointless, do nothing.\n return 0\n\n self.__record_ledger__(-amount, reason)\n self.balance -= amount\n return self",
"def check_amount_validity(self, amount):\r\n\r\n alert = \"Not a valid amount. Please try again!\"\r\n\r\n if type(amount) == int or type(amount) == float:\r\n return amount\r\n else:\r\n return alert",
"def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']",
"def deposit(self, amount):\n message = self.account.deposit(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"",
"def test_fail_balance_positive(self):\n self.bundle.transactions[0].value += 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Bundle has invalid balance (expected 0, actual 1).',\n ],\n )",
"async def test_fail_neagative_amount(self, conn, user_with_wallet):\n amount = Decimal('-3')\n\n with pytest.raises(ValueError) as exc:\n await get_from_wallet(\n conn,\n wallet_id=user_with_wallet[1],\n amount=amount,\n )\n assert str(exc.value) == 'Amount must be positive'",
"def test_missing_balance(self):\n db = MockDatabase()\n\n prev = TestBlock(block_type=BlockTypes.CHECKPOINT, transaction={})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.invalid)\n self.assertIsInstance(errors[0], EuroTokenBlock.MissingBalance)",
"def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r",
"def get_amount(self):\n\t\tif self.amount is not None:\n\t\t\treturn self.amount\n\t\treturn abort(400, {\"message\" : \"please provide the amount to process\"})",
"def validate(self):\n if self.amount > 0:\n return True\n return False",
"def test_withdraw_amount_view_with_negative_amount(self):\n self.account.current_balance = 100000\n self.account.save()\n\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': -100}, format='json')\n self.assertEqual(400, request.status_code)",
"def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True",
"def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance",
"def mempool_assert_my_amount(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:\n if unspent.coin.amount != int_from_bytes(condition.vars[0]):\n return Err.ASSERT_MY_AMOUNT_FAILED\n return None",
"def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True",
"def delete(self):\n existing_balance = self.account.calculated_balance\n\n if not self.active:\n pass\n elif (existing_balance - self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n super().delete()",
"def sanitize_balance(balance: str) -> Union[int, float]:\n if balance.lower() in [\"unlimited\", \"n/a\"]:\n return -1\n # Take the string and convert it to a numeric type.\n to_number = float(balance.replace(\",\", \"\"))\n # Only return a float if we need decimal precision.\n return to_number if to_number % 1 else int(to_number)",
"def validate_payment_amount(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n\n credit_card = tracker.get_slot(\"credit_card\")\n cc_balance = tracker.get_slot(\"credit_card_balance\")\n account_balance = float(tracker.get_slot(\"account_balance\"))\n try:\n entity = get_entity_details(\n tracker, \"amount-of-money\"\n ) or get_entity_details(tracker, \"number\")\n amount_currency = parse_duckling_currency(entity)\n if not amount_currency:\n raise (TypeError)\n if account_balance < float(amount_currency.get(\"amount_of_money\")):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return amount_currency\n except (TypeError, AttributeError):\n pass\n if value and value.lower() in cc_balance.get(credit_card.lower()):\n key = value.lower()\n amount = cc_balance.get(credit_card.lower()).get(key)\n amount_type = f\" (your {key})\"\n\n if account_balance < float(amount):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return {\n \"payment_amount\": f\"{amount:.2f}\",\n \"payment_amount_type\": amount_type,\n \"currency\": \"$\",\n }\n\n else:\n dispatcher.utter_message(template=\"utter_no_payment_amount\")\n return {\"payment_amount\": None}",
"def test_invalid_balance_genesis(self):\n db = MockDatabase()\n prev = TestBlock(block_type=BlockTypes.TRANSFER, transaction={'balance': 1, 'amount':5})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.invalid)\n self.assertIsInstance(errors[0], EuroTokenBlock.InvalidBalance)",
"async def test_fail_neagative_amount(self, conn, user_with_wallet):\n amount = Decimal('-3')\n\n with pytest.raises(ValueError) as exc:\n await add_to_wallet(\n conn,\n wallet_id=user_with_wallet[1],\n amount=amount,\n )\n assert str(exc.value) == 'Amount must be positive'",
"def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance",
"def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance",
"def withdraw(self, amount):\n self.transactions += [('withdraw', amount)]\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance",
"def balance_money_check():\r\n print(balance_money)",
"def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))",
"def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)",
"def _is_amount_valid(self):\n\t\tamount = self.entry_amount.get()\n\n\t\ttry:\n\t\t\tfloat(amount)\n\t\texcept ValueError:\n\t\t\tmessagebox.showerror(\"Invalid Amount\", \"Amount must be a positive number\")\n\t\t\treturn False\n\n\t\tif float(amount) < 0:\n\t\t\tmessagebox.showerror(\"Invalid Amount\", \"Amount must be a positive number\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True"
] | [
"0.6829546",
"0.665555",
"0.6612244",
"0.65875065",
"0.6495299",
"0.6483181",
"0.62700593",
"0.6266016",
"0.622912",
"0.62264717",
"0.6190278",
"0.6178433",
"0.603522",
"0.6021789",
"0.6011362",
"0.5971348",
"0.5929509",
"0.5925402",
"0.59032834",
"0.5886347",
"0.5868926",
"0.58591205",
"0.5858177",
"0.5853787",
"0.5853787",
"0.58535427",
"0.58454156",
"0.5844892",
"0.58054477",
"0.57596517"
] | 0.7301331 | 0 |
Computes, deposits, and returns the interest. | def compute_interest(self) -> float:
interest = self._balance * SavingsAccount.RATE
self.deposit(interest)
return interest | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def total_interest(self):\n return sum(self.table[\"interest\"])",
"def total_interest(self) -> Decimal:\n return self._quantize(self.schedule(int(self.term / self.term_multiplier * self.n_periods)).total_interest)",
"def investment(principal, interest):\r\n while True:\r\n principal *= (1 + interest)\r\n yield principal",
"def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12",
"def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir",
"def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))",
"def calculate_profit(self):",
"def get_percent_interest(self):\n return self.__percentage_interest",
"def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense",
"def interest(self, from_date, to_date):\n yearfrac = findates.daycount.yearfrac(from_date,\n to_date,\n \"30/360 US\")\n months = yearfrac * 12\n return Decimal((1.0 + \\\n self.annual_interest_rate / 12.0) ** months - 1.0)",
"def calculate_compound_total(principal, interest, n):\n return principal * (1 + interest / 100) ** n",
"def impurity_improvement(self, impurity):\n\n\t\timpurity_left, impurity_right = self.children_impurity()\n\t\t#return (impurity - impurity_right - impurity_left)\n\t\treturn ((self.weighted_n_node_samples / self.weighted_n_samples) *\n\t\t\t\t\t(impurity - self.weighted_n_right / self.weighted_n_node_samples * impurity_right\n\t\t\t\t\t- self.weighted_n_left / self.weighted_n_node_samples * impurity_left))",
"def PV_BenefitSurrender(t):\n if t > last_t:\n return 0\n else:\n return (-prj_bnft_Surrender(t) + PV_BenefitSurrender(t + 1)) / (1 + DiscRate(t))",
"def impurity(eps):\n # TODO your code here\n return cal_gini(eps)",
"def Insurance(Md,X):\n u = X[iu]\n b = Md.b()\n return u/b - u/(1-u+u*b)",
"async def get_coins_of_interest(\n self,\n ) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:\n all_pending = []\n pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)\n pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)\n pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)\n all_pending.extend(pending_accept)\n all_pending.extend(pending_confirm)\n all_pending.extend(pending_cancel)\n removals = {}\n additions = {}\n\n for trade in all_pending:\n for coin in trade.removals:\n removals[coin.name()] = coin\n for coin in trade.additions:\n additions[coin.name()] = coin\n\n return removals, additions",
"def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self",
"def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost",
"def prompt_user_account_to_get_interest():\n print('What account do you want 0.5% automatic interest?:')\n return input()",
"def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None",
"def _compute_amount(self):\n raise NotImplementedError()",
"def get_contribution(self):\n salary = self._get_salary()\n if not salary:\n return 0\n # Class 1 NIC.\n contribution = 0\n st = 702\n if salary > st:\n contribution = (salary - st) * 0.138\n return contribution",
"def calculator(self, income):\n annuity = float(config.basic(income)) # 社保总额\n out = []\n if float(income) > 3500.00:\n taxable_income = (float(income) - float(annuity) - 3500.00) # 课税对象金额\n taxrate = self.tax_rate(taxable_income) # 税率\n deduction = deductions[taxrate] # 速算扣除数\n tax = taxable_income * taxrate - deduction # 个税金额\n after = float(income) - float(tax) - float(annuity) # 税后工资\n # print(\"社保总额:{}, 个税金额:{}, 税后工资:{}\".format(annuity, tax, after))\n else:\n tax = 0.00 # 个税金额\n after = float(income) - annuity\n for i in [annuity, tax, after]:\n out.append('{:.2f}'.format(i))\n return out",
"def Insurance(Md,X):\n if VERSION == 16:\n utemp = 0.0*X[iu]+1.0*Md.ubar\n elif VERSION == 31:\n utemp = 1.25*(X[iu]-Md.ubar)+1.0*Md.ubar\n else:\n utemp = X[iu]\n\n Mom = Md.IP.get_Moments(utemp,Md.ubar,Md.tau)\n return beta*(-Mom[1]+Mom[3]/Mom[0])",
"def calculate(self) -> float:",
"def cal_scaled_interest(nominal_interest_rate, installment_time_period, interest_time_period, interest_paid_on_deposit_percent):\n periods_per_year = np.array([365, 52, 26, 24, 13, 12, 4, 2, 1])\n installments_period_dict = {'days':0, 'weeks':1, 'two-weeks':2, '15 days':3, '4 weeks':4, 'months':5, 'quarters':6, 'half-years':7, 'years':8}\n interest_period_dict = {'day':0, 'week':1, 'two-weeks':2, '15 days':3, '4 weeks':4, 'month':5, 'quarter':6, 'half-year':7, 'year':8}\n\n installments_arr = 1/ (periods_per_year / 12)\n nominal_arr = 1 / installments_arr\n scaled_interest = nominal_interest_rate*installments_arr[installments_period_dict[installment_time_period]] * nominal_arr[interest_period_dict[interest_time_period]]\n security_deposit_scaled_interest = interest_paid_on_deposit_percent / periods_per_year[installments_period_dict[installment_time_period]]\n return scaled_interest, security_deposit_scaled_interest",
"def estimate_return(self, ob_no, re_n, hidden, masks):\n adv_n, q_n = self.compute_advantage(ob_no, re_n, hidden, masks)\n return q_n, adv_n",
"def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final",
"def PV_IncomePremium(t):\n if t > last_t:\n return 0\n else:\n return prj_incm_Premium(t) + PV_IncomePremium(t + 1) / (1 + DiscRate(t))",
"def life_insurance_to_recive_total(self):\n pass"
] | [
"0.6494362",
"0.6311625",
"0.6234926",
"0.61784625",
"0.61245126",
"0.5884585",
"0.5769434",
"0.57603633",
"0.5738999",
"0.5728413",
"0.57176214",
"0.5580446",
"0.5550851",
"0.5508479",
"0.55062574",
"0.54875445",
"0.54787284",
"0.54710907",
"0.54048574",
"0.5400234",
"0.53486544",
"0.5343253",
"0.5281188",
"0.5269879",
"0.5262919",
"0.5245599",
"0.5239344",
"0.52343607",
"0.5228504",
"0.52278316"
] | 0.74920017 | 0 |
This request returns all the colors in an image as RGB values. | def color(self, image):
response = self._send_request("color", files=dict(image=image))
return response[self._layer]['colors'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_colors(self, url):\n fd = urlopen(url)\n f = io.BytesIO(fd.read())\n im = Image.open(f)\n palette = im.quantize(colors=len(self.lights)).getpalette()\n return self.extract_colors(palette, len(self.lights))",
"def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)",
"def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b",
"def unique_colors(img):\n colors = {i[1] for i in img.getcolors(maxcolors=img.size[0]*img.size[1])}\n return colors",
"def rgb(self):\n return [self.__r, self.__g, self.__b]",
"def findAllColors(img):\n hist = cv2.calcHist([img], [0, 1, 2], None, [256] * 3, [0, 256] * 3)\n allColors = np.argwhere(hist != 0)\n return allColors",
"def get_all_rgb_values(self):\n\n rgb_values = []\n response = self._table.scan()\n for item in response['Items']:\n rgb_values.append(self._convert_rgb_string_to_tuple(item['rgb_values']))\n\n return rgb_values",
"def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self[\"colors\"]",
"def getPaletteInRgb(img):\n assert img.mode == 'P', \"image should be palette mode\"\n pal = img.getpalette()\n colors = chunk(pal, 3, False)\n return colors",
"def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img",
"def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def colors(self):\r\n\t\treturn self._colors",
"def get_color_data(r, g, b, d=3):\n payload = {'r': r, 'g': g, 'b': b, 'd': d}\n\n r = requests.get('http://cs.mwsu.edu/~griffin/color-api/', params=payload)\n print(r)\n return r.json()",
"def GetRGB(self, *args):\n return _XCAFDoc.XCAFDoc_Color_GetRGB(self, *args)",
"def colorPaletteToRGB(image_data,color_table): \n color_table_array = numpy.array([ord(c) for c in color_table])\n n_colors = color_table_array.size / 3\n color_table_array = color_table_array.reshape((n_colors,3))\n channels = [color_table_array[image_data,i] for i in range(3)]\n return channels",
"def rgb(self):\n return (self.red, self.green, self.blue)",
"def rgb(self):\n return (self.r, self.g, self.b)",
"def iter_colors(self):\n return itervalues(self)",
"def colors(self):\n return self._colors",
"def extract_red_values(gaussian_frame, show_processed_image):\n if show_processed_image:\n red_values = []\n for i in range(0, gaussian_frame.shape[0]):\n red_value = gaussian_frame[i, :, 2]\n red_values.append(red_value)\n else:\n red_values = gaussian_frame[:, :, 2]\n return red_values",
"def image_to_data(image):\n pixels = image.convert('RGB').load()\n width, height = image.size\n for y in range(height):\n for x in range(width):\n r,g,b = pixels[(x,y)]\n color = rgb(r, g, b)\n yield (color >> 8) & 0xFF\n yield color & 0xFF",
"def get_rgbColorArray(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=0&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res",
"def GetColors(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_GetColors(self, *args)",
"def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapColorArgs']]]]:\n return pulumi.get(self, \"colors\")",
"def get_color(self):\n colors = []\n color_specs = [self._red_spec, self._green_spec,\n self._blue_spec, self._white_spec]\n for spec in color_specs:\n driver = DRIVERS[spec.addr]\n colors.append(driver.get_duty_cycle(spec.pin))\n \n return colors",
"def _get_region_color(self, region):\n return [\n x / 255 for x in self._get_from_structure(region, \"rgb_triplet\")\n ]",
"def get_colors(self, maxcolors=None):\n if maxcolors:\n return self.image.getcolors(maxcolors)\n return self.image.getcolors(self.width*self.height)"
] | [
"0.7301836",
"0.7018421",
"0.683226",
"0.6683104",
"0.6672693",
"0.6624762",
"0.6596658",
"0.6569393",
"0.646526",
"0.646526",
"0.63765454",
"0.63485533",
"0.6336055",
"0.63002443",
"0.6296228",
"0.625343",
"0.6228482",
"0.6222722",
"0.62008834",
"0.61840725",
"0.6144083",
"0.6143036",
"0.6118081",
"0.61097234",
"0.6107539",
"0.6102043",
"0.6061389",
"0.6060458",
"0.60545236",
"0.6034048"
] | 0.7611546 | 0 |
Create instances of each available layer. | def __init__(self):
for layer in self._layer_class_map:
setattr(self, layer, self._layer_class_map[layer]()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self",
"def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)",
"def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()",
"def create_all(self, registry):\n for cls in registry.values():\n self.create_class(cls)",
"def create(self):\n\n if rs.IsLayer(self.name):\n\n return self\n\n mom = \"\"\n \n for s in self.path:\n \n son = s if (mom == \"\") else (mom + \"::\" + s)\n\n mommy = None if mom == \"\" else mom\n\n if not rs.IsLayer(son):\n\n rs.AddLayer(s, color = None, visible = True, locked = False, parent = mommy)\n\n mom = son\n \n return self",
"def build_layers(self):\n raise NotImplementedError",
"def declare_layers(self, names):\n for name in names:\n self[name]\n return self",
"def declare_layers(self, names):\n for name in names:\n self[name]\n return self",
"def __init__(self, layers):\n\n\t\tself.layers = layers",
"def build(self, *args, **kwargs):\n self._layer_counter = 0\n r = self._build_impl(*args, **kwargs)\n \n # Call the init functions \n if self._build_counter == 0:\n for initlayer in self._layers_to_init:\n if initlayer['initfnkwargs']:\n initlayer['initfn'](initlayer['layer'], **initlayer['initfnkwargs'])\n else:\n initlayer['initfn'](initlayer['layer'])\n \n self._build_counter += 1\n return r",
"def __initAvailableLayerTypes(self):\n from backend.caffe.path_loader import PathLoader\n caffe = PathLoader().importCaffe()\n layerNameMainParts = list(caffe.layer_type_list())\n\n res = {}\n paramsPerLayerType = {}\n\n # calculate common parameters of all layer types\n # by removing all which will be used for one specific layer type only\n # also keep in mind which ones have been removed to readd them to specific layers\n commonParams = self._availableParameterGroupDescriptors[\"LayerParameter\"].parameter() #use .parameter() on purpose\n layerSpecificParameters = set()\n for nameMainPart in layerNameMainParts:\n specificParamsName = [nameMainPart + \"Parameter\"]\n if moreLayerNameParameter.has_key(nameMainPart):\n specificParamsName.append( moreLayerNameParameter[nameMainPart])\n paramsPerLayerType[nameMainPart] = {}\n for key, value in commonParams.items():\n if value.isParameterGroup() and value.parameterName() in specificParamsName:\n paramsPerLayerType[nameMainPart][key] = value\n layerSpecificParameters.add(key)\n\n\n # special case: shared params for loss layers\n key = \"loss_param\"\n value = commonParams[key]\n del commonParams[key]\n for nameMainPart in layerNameMainParts:\n if LayerType.getCategoryByName(nameMainPart) == LayerType.CATEGORY_LOSS:\n paramsPerLayerType[nameMainPart][key] = value\n\n # TODO is there a special case for the TransformationParameter?\n\n # create each layer type after one another\n for nameMainPart in layerNameMainParts:\n\n # add common params to the specific ones\n layerTypeParam = paramsPerLayerType[nameMainPart].keys()\n paramsPerLayerType[nameMainPart].update(commonParams)\n\n irrelevant = layerSpecificParameters.difference(layerTypeParam)\n res[nameMainPart] = LayerType(nameMainPart, paramsPerLayerType[nameMainPart], layerTypeParam, irrelevant)\n\n self._commonParams = commonParams\n self._availableLayerTypes = res",
"def make_instances():\n body = request.json\n return create_instances(\n flavor=body.get(\"flavor\"),\n name=body.get(\"name\"),\n network_name=body.get(\"network_name\"),\n )",
"def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs",
"def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X",
"def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)",
"def consume_layer(self, reports):\n layer_list = []\n layer_count = 1\n for report in reports:\n layer = create_image_layer(report)\n layer.layer_index = layer_count\n layer_list.append(layer)\n layer_count += 1\n return layer_list",
"def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)",
"def create(self, level, num_levels):\n print(\"Processing level {}\".format(level))\n self.__createCorridors(level)\n if level > 0 and level == num_levels-1:\n self.__createElevator(num_levels)\n self.__createDrillholes()\n\n # Pick the endpoint of some random drillholes as seeds for the\n # starting point of the geological shapes.\n seeds = [d.line.p2 for d in random.sample(self.drills, self.num_shapes)]\n\n # Launch parallel instances of the geological shape creator. Note\n # that because shapes can be very large, it is possible to exceed\n # the amount of space reserved for IPC shared memory. Our workaround\n # is to use a thread pool (at the expense of having to be ruled by\n # Python's Global Interpreter Lock).\n with concurrent.futures.ThreadPoolExecutor() as executor:\n for shape in executor.map(self.__createGeologicalShape, seeds):\n self.shapes.append(shape)",
"def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()",
"def create_network(layers):\r\n return NeuronNetwork(layers)",
"def make_objects(self):\n pass",
"def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()",
"def __init__(self, weights=[], alphas=[]):\n self._layers = [Layer(w, a) for w, a in zip(weights, alphas)]",
"def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)",
"def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self",
"def create_network_structures(self, layers, layers_num, input_shape):\n\n start_time = time.time()\n for i in layers_num:\n j = 0\n dups = 0\n while j < self.structure_sample_size:\n net_struct = self.random_product(layers, repeat=i)\n fixed_net_struct = self.model_factory.fix_or_skip(net_struct, input_shape)\n if fixed_net_struct:\n struct_hash = hashlib.md5(\"\".join(fixed_net_struct))\n if struct_hash not in self.seen_structures:\n self.seen_structures.add(struct_hash)\n j += 1\n dups = 0\n yield fixed_net_struct\n else:\n print \"Skipping structure that has already been trained\"\n dups += 1\n if dups > self.STRUCT_DUP_THRESHOLD:\n # We probably seen all structures with this size, so skip to the next size.\n break\n else:\n #print \"skipping invalid structure: %s\" % \"->\".join(net_struct)\n continue\n\n print \"Done in %d minutes!\" % ((time.time() - start_time) / 60)",
"def init_vector_layers(self):\n if self.point_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.point_vector_layer.id())\n if self.line_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.line_vector_layer.id())\n if self.polygon_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.polygon_vector_layer.id())\n\n self.point_vector_layer = QgsVectorLayer(KEY_POINT, \"Vector Items (Points)\", \"memory\")\n self.point_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.point_vector_layer)\n\n self.line_vector_layer = QgsVectorLayer(KEY_LINE, \"Vector Items (Lines)\", \"memory\")\n self.line_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.line_vector_layer)\n\n self.polygon_vector_layer = QgsVectorLayer(KEY_POLYGON, \"Vector Items (Polygons)\", \"memory\")\n self.polygon_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.polygon_vector_layer)\n\n point_data_provider = self.point_vector_layer.dataProvider()\n line_data_provider = self.line_vector_layer.dataProvider()\n polygon_data_provider = self.polygon_vector_layer.dataProvider()\n\n attribute_fields = []\n for attribute in KEY_JSON_PROPERTIES_LIST:\n attribute_fields.append(QgsField(u'vector_' + attribute, QVariant.String))\n\n point_data_provider.addAttributes(attribute_fields)\n line_data_provider.addAttributes(attribute_fields)\n polygon_data_provider.addAttributes(attribute_fields)",
"def make(cls, *args, **kwargs):\n if not args and 'n_repeats' not in kwargs:\n return kwargs.pop('base_block', MultiLayer)(**kwargs)\n return cls(*args, **kwargs)",
"def SetUpLayerManager(self):\n pass",
"def createMachines():\n machines = []\n for i in range(0, num_of_machines):\n cur_machine = Machine(i)\n machines.append(cur_machine)\n return machines"
] | [
"0.676467",
"0.6691837",
"0.66686845",
"0.64733326",
"0.640632",
"0.6375107",
"0.6259991",
"0.6259991",
"0.6215809",
"0.6148475",
"0.6107703",
"0.6078645",
"0.606979",
"0.60238254",
"0.59650904",
"0.59550387",
"0.595401",
"0.59325004",
"0.5912907",
"0.58784574",
"0.5870248",
"0.58582664",
"0.584021",
"0.5818193",
"0.5789758",
"0.57641685",
"0.5758822",
"0.57567406",
"0.5754144",
"0.57522535"
] | 0.6919045 | 0 |
Set value to the tensor. | def set_value(self, indices, val):
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Update(self, value):\n self.SetValue(self.GetValue() + tf.cast(value, self.dtype))",
"def Update(self, value):\n self.SetValue(self.GetValue() + tf.cast(value, self.dtype))",
"def set_node_value(node: Node, value: np.ndarray):\n if node.type != 'Const':\n raise Exception('Can\\'t set value for non-constant node {}'.format(node.name))\n data_type = np.float32\n if node.out_port(0).is_data_type_defined():\n data_type = node.out_port(0).get_data_type()\n node.out_port(0).data.set_value(np.array(value).astype(data_type))",
"def share(self, value):\n self._tensor = value",
"def convert_set_value(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n if op.input(\"StartsTensorList\"):\n starts = g.get_node(op.input(\"StartsTensorList\")[0])\n else:\n starts = op.attr(\"starts\")[0]\n\n if op.input(\"EndsTensorList\"):\n ends = g.get_node(op.input(\"EndsTensorList\")[0])\n else:\n ends = op.attr(\"ends\")[0]\n\n axes = op.attr(\"axes\")\n assert len(axes) == 1, \"Only support one axes now.\"\n axes = axes[0]\n\n input_shape = infer_shape(x)\n ends = min(ends, input_shape[axes])\n\n if op.input(\"StepsTensorList\"):\n steps = g.get_node(op.input(\"StepsTensorList\")[0])\n else:\n steps = op.attr(\"steps\")[0]\n\n if op.input(\"ValueTensor\"):\n value = g.get_node(op.input(\"ValueTensor\")[0])\n else:\n input_dtype = infer_type(x).checked_type.dtype\n if input_dtype == \"float64\":\n value = _expr.const(op.attr(\"fp64_values\"), dtype=\"float64\")\n elif input_dtype == \"float32\":\n value = _expr.const(op.attr(\"fp32_values\"), dtype=\"float32\")\n elif input_dtype == \"int32\":\n value = _expr.const(op.attr(\"int32_values\"), dtype=\"int32\")\n elif input_dtype == \"int64\":\n value = _expr.const(op.attr(\"int64_values\"), dtype=\"int64\")\n else:\n raise tvm.error.OpNotImplemented(\n \"dtype {} is not supported for set_value\".format(input_dtype)\n )\n\n sliced_data = _op.strided_slice(x, begin=[starts], end=[ends], strides=[steps], axes=[axes])\n sliced_shape = infer_shape(sliced_data)\n\n if infer_shape(value) != sliced_shape:\n expand_value = _op.broadcast_to(value, sliced_shape)\n else:\n expand_value = value\n\n if starts < 0:\n starts = starts + input_shape[axes]\n if ends < 0:\n ends = ends + input_shape[axes]\n\n indices = _op.arange(\n start=_expr.const(starts, dtype=\"int32\"),\n stop=_expr.const(ends, dtype=\"int32\"),\n step=_expr.const(steps, dtype=\"int32\"),\n dtype=\"int32\",\n )\n indices = _op.expand_dims(indices, axis=0)\n out = _op.scatter_nd(x, indices, expand_value, \"update\")\n g.add_node(op.output(\"Out\")[0], out)",
"def set(self, node, value):\n self.val[node] = value",
"def setTensor(self, tensor):\t\t\n\t\tself.cur_tensor = tensor\n\t\tif tensor is not None:\n\t\t\tself.output_shape[self.cur_id] = self.cur_tensor.size()\n\t\telse:\n\t\t\tself.output_shape[self.cur_id] = None",
"def set_value(self, value: float):\n self.points[0, 0] = value\n return self",
"def setValue(self,val):\n self.input.setValues(val)",
"def set_value(self,x):\n self._value = x",
"def set_value(self,x):\n self._value = x",
"def value(self, value):\n self.set_data(value)",
"def setData(self, index, value):\n \n self.state[index.row()][index.column()] = value\n return value",
"def value(self, value: float):\n\n self._value = value",
"def value(self, value: float):\n\n self._value = value",
"def assign(self, value):\n self.value = value",
"def set(self, value):\n if not self.independent:\n raise TypeError(\"Cannot set the value of a cell that is not independent\")\n self._set(value)\n return self",
"def set_tf(self, x):\n x = float(x)\n if self.tf != x:\n self.tf = x",
"def assign(self, value):\n if self._log:\n raise StructureError(\"Trying to assign non-log values to log-weights.\")\n\n value = tf.where(tf.is_nan(value), tf.ones_like(value) * 0.01, value)\n if self._mask and not all(self._mask):\n # Only perform masking if mask is given and mask contains any 'False'\n value *= tf.cast(tf.reshape(self._mask, value.shape), dtype=conf.dtype)\n value = value / tf.reduce_sum(value, axis=-1, keepdims=True)\n return tf.assign(self._variable, value)",
"def set_value(\n self,\n value: float,\n ) -> None:\n self._data_provider.set_value(value)",
"def set_value ( self, object, value ):\n object[ self.index ] = value",
"def set_value (self):\n raise NotImplementedError",
"def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value",
"def set_value(self, val):\n self.value = val",
"def setValue(self, value):\n self.setValues((value, value))",
"def set_value(self, m: int, n: int, value: int) -> None:\n\t\tself.matrix[m][n] = value",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()"
] | [
"0.73305565",
"0.73305565",
"0.7164538",
"0.7106505",
"0.69294816",
"0.6863438",
"0.67309785",
"0.6588569",
"0.65452635",
"0.6523065",
"0.6523065",
"0.6513894",
"0.6425833",
"0.6411298",
"0.6411298",
"0.63991857",
"0.63785946",
"0.6368179",
"0.63664836",
"0.6361774",
"0.6361035",
"0.63446367",
"0.6331422",
"0.6329471",
"0.6329144",
"0.6320682",
"0.6288471",
"0.6288471",
"0.6288471",
"0.6284188"
] | 0.76344925 | 0 |
Given a list of indices (with possible repeats), run optimization and return stats. | def best_value_many_indices(self, indices_list, **kwargs):
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scipy_optimize_from_indices(\n muygps: MuyGPS,\n batch_indices: np.ndarray,\n batch_nn_indices: np.ndarray,\n test: np.ndarray,\n train: np.ndarray,\n train_targets: np.ndarray,\n loss_method: str = \"mse\",\n verbose: bool = False,\n) -> np.ndarray:\n crosswise_dists = crosswise_distances(\n test,\n train,\n batch_indices,\n batch_nn_indices,\n metric=muygps.kernel.metric,\n )\n pairwise_dists = pairwise_distances(\n train, batch_nn_indices, metric=muygps.kernel.metric\n )\n return scipy_optimize_from_tensors(\n muygps,\n batch_indices,\n batch_nn_indices,\n crosswise_dists,\n pairwise_dists,\n train_targets,\n loss_method=loss_method,\n verbose=verbose,\n )",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def sample(self, indices: List[int]) -> Tuple[torch.Tensor, ...]:\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n for i in indices:\n s, a, r, n_s, d = self.buffer[i]\n states.append(np.array(s, copy=False))\n actions.append(np.array(a, copy=False))\n rewards.append(np.array(r, copy=False))\n next_states.append(np.array(n_s, copy=False))\n dones.append(np.array(float(d), copy=False))\n\n states_ = torch.FloatTensor(np.array(states)).to(device)\n actions_ = torch.FloatTensor(np.array(actions)).to(device)\n rewards_ = torch.FloatTensor(np.array(rewards).reshape(-1, 1)).to(device)\n next_states_ = torch.FloatTensor(np.array(next_states)).to(device)\n dones_ = torch.FloatTensor(np.array(dones).reshape(-1, 1)).to(device)\n\n if torch.cuda.is_available():\n states_ = states_.cuda(non_blocking=True)\n actions_ = actions_.cuda(non_blocking=True)\n rewards_ = rewards_.cuda(non_blocking=True)\n next_states_ = next_states_.cuda(non_blocking=True)\n dones_ = dones_.cuda(non_blocking=True)\n\n return states_, actions_, rewards_, next_states_, dones_",
"def __call__(self, index, grad, weight):\n allow_np = self.optimizer.allow_np_array if hasattr(self.optimizer, \"allow_np_array\") else is_np_array()\n if not isinstance(index, (list, tuple)):\n indices = [index]\n grads = [_as_classic(grad, allow_np)]\n weights = [_as_classic(weight, allow_np)]\n else:\n indices = index\n grads = _as_classic(grad, allow_np)\n weights = _as_classic(weight, allow_np)\n if weights:\n self.optimizer._set_current_context(weights[0].context.device_id)\n for i, idx in enumerate(indices):\n # convert ctypes.char_p.value back to python str if needed\n if isinstance(idx, bytes):\n indices[i] = py_str(idx)\n idx = indices[i]\n if idx not in self.states:\n with profiler_scope(\"updater:optimizer_state\"):\n self.states[idx] = self.optimizer.create_state_multi_precision(idx, weights[i])\n self.states_synced[idx] = True\n elif not self.states_synced[idx]:\n self.states[idx] = \\\n self.sync_state_context(self.states[idx], weights[i].context)\n self.states_synced[idx] = True\n if self.aggregate_updates:\n # segregate values based on type\n if self.optimizer.aggregate_num is not numpy.inf:\n type_map = {}\n for i, w, g in zip(indices, weights, grads):\n if w.dtype in type_map:\n type_map[w.dtype].append((i, w, g))\n else:\n type_map[w.dtype] = [(i, w, g)]\n for idx in type_map:\n current_index = 0\n indices, weights, grads = zip(*type_map[idx])\n while current_index < len(indices):\n states = []\n step = min(self.optimizer.aggregate_num, len(indices) - current_index)\n for j in range(step):\n states.append(self.states[indices[current_index + j]])\n self.optimizer.update_multi_precision(\n indices[current_index:current_index + self.optimizer.aggregate_num],\n weights[current_index:current_index + self.optimizer.aggregate_num],\n grads[current_index:current_index + self.optimizer.aggregate_num],\n states)\n current_index += self.optimizer.aggregate_num\n else:\n states = [self.states[i] for i in indices]\n self.optimizer.update_multi_precision(indices, weights, grads, states)\n else:\n for i, w, g in zip(indices, weights, grads):\n self.optimizer.update_multi_precision([i], [w], [g], [self.states[i]])",
"def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res",
"def sample_task_ind(self, inds: List[int]):\n for i in range(len(self.data)):\n all_targets = self.data[i].targets\n new_targets = [all_targets[ind] for ind in inds]\n self.data[i].targets = new_targets",
"def getResult(targets, i=None):",
"def experiment_measurements_index(fun, num_measurements, sd, num_trials, seed=21):\n experiments = {}\n solutions = {}\n for ns in num_measurements:\n ratios = []\n mud_solutions = []\n for t in range(num_trials):\n np.random.seed(seed+t)\n _r = fun(sd=sd, num_obs=ns)\n ratios.append(_r)\n mud_solutions.append(np.argmax(_r))\n experiments[ns] = ratios\n solutions[ns] = mud_solutions\n \n return experiments, solutions",
"def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)",
"def main():\n for opt in optimizations:\n compile_command = [\"g++\", \"main.cpp\", f\"-O{opt}\", \"-lpthread\"]\n run(compile_command, check=True)\n for threads in num_threads:\n print(f\"{opt=}, {threads=}\", end=\"\")\n stdout.flush()\n test_command = ['./a.out', str(iterations), str(threads)]\n total = 0\n for samples in range(1, repeats_for_average + 1):\n print(\".\", end=\"\")\n stdout.flush()\n output = run(test_command, check=True, capture_output=True).stdout\n total += int(output.split()[-2]) / 1000\n print(f\"\\t{total / samples:.03f}\")",
"def multiple_eval_for_loops_v1():",
"def evaluate_diversity_single(indices, distances, weight=0.5):\n i, j = [e for e in zip(*itertools.combinations(indices, 2))]\n subset_distances = distances[i, j]\n minimum = np.min(subset_distances)\n mean = np.mean(subset_distances)\n diversity = (1 - weight) * minimum + weight * mean\n\n return [diversity]",
"def process_index(files, idx, idx2=None, cache=None):\n if cache is None:\n cache = {}\n\n if idx in cache:\n return cache[idx]\n\n filename = files[idx]\n if idx2 is None:\n self.log(\n \"Computing Alms for map {}/{}\".format(idx + 1, num_maps), \"all\"\n )\n else:\n self.log(\n \"Computing Alms for sim {} of map {}/{}\".format(\n idx2, idx + 1, num_maps\n ),\n \"all\",\n )\n filename = filename[idx2]\n\n m = self.get_map(filename)\n mask = self.get_mask(mask_files[idx])\n self.apply_mask(m, mask)\n m_alms = self.map2alm(m, self.pol)\n\n cache[idx] = m_alms\n return cache[idx]",
"def run_several_iterations(iterations, means, horizon):\n\n # Initializing the results vector.\n results = [0]*horizon\n\n for iteration in range(iterations):\n\n # The current cumulative regret.\n results = np.add(results, run_sparring_algorithm(means[:, iteration], horizon))\n\n # Returning the average cumulative regret.\n return results/(iterations +.0)",
"def multiple_eval_for_loops_v2():",
"def calculate(self, indices=None, fupdate=0.05):\r\n if not indices:\r\n # Build list of groups of orbitals in each atom for atomresults.\r\n if hasattr(self.data, \"aonames\"):\r\n names = self.data.aonames\r\n elif hasattr(self.data, \"foonames\"):\r\n names = self.data.fonames\r\n\r\n atoms = []\r\n indices = []\r\n\r\n name = names[0].split('_')[0]\r\n atoms.append(name)\r\n indices.append([0])\r\n\r\n for i in range(1, len(names)):\r\n name = names[i].split('_')[0]\r\n try:\r\n index = atoms.index(name)\r\n except ValueError: #not found in atom list\r\n atoms.append(name)\r\n indices.append([i])\r\n else:\r\n indices[index].append(i)\r\n\r\n # Determine number of steps, and whether process involves beta orbitals.\r\n nfrag = len(indices) #nfrag\r\n nstep = func(nfrag - 1)\r\n unrestricted = (len(self.data.mocoeffs) == 2)\r\n alpha = len(self.data.mocoeffs[0])\r\n nbasis = self.data.nbasis\r\n\r\n self.logger.info(\"Creating attribute results: array[4]\")\r\n results= [ numpy.zeros([nfrag, nfrag, alpha], \"d\") ]\r\n if unrestricted:\r\n beta = len(self.data.mocoeffs[1])\r\n results.append(numpy.zeros([nfrag, nfrag, beta], \"d\"))\r\n nstep *= 2\r\n\r\n if hasattr(self.data, \"aooverlaps\"):\r\n overlap = self.data.aooverlaps\r\n elif hasattr(self.data,\"fooverlaps\"):\r\n overlap = self.data.fooverlaps\r\n\r\n #intialize progress if available\r\n if self.progress:\r\n self.progress.initialize(nstep)\r\n\r\n size = len(self.data.mocoeffs[0])\r\n step = 0\r\n\r\n preresults = []\r\n for spin in range(len(self.data.mocoeffs)):\r\n two = numpy.array([2.0]*len(self.data.mocoeffs[spin]),\"d\")\r\n\r\n\r\n # OP_{AB,i} = \\sum_{a in A} \\sum_{b in B} 2 c_{ai} c_{bi} S_{ab}\r\n\r\n for A in range(len(indices)-1):\r\n\r\n for B in range(A+1, len(indices)):\r\n\r\n if self.progress: #usually only a handful of updates, so remove random part\r\n self.progress.update(step, \"Overlap Population Analysis\")\r\n\r\n for a in indices[A]:\r\n\r\n ca = self.data.mocoeffs[spin][:,a]\r\n\r\n for b in indices[B]:\r\n\r\n cb = self.data.mocoeffs[spin][:,b]\r\n temp = ca * cb * two *overlap[a,b]\r\n results[spin][A,B] = numpy.add(results[spin][A,B],temp)\r\n results[spin][B,A] = numpy.add(results[spin][B,A],temp)\r\n\r\n step += 1\r\n\r\n temparray2 = numpy.swapaxes(results[0],1,2)\r\n self.results = [ numpy.swapaxes(temparray2,0,1) ]\r\n if unrestricted:\r\n temparray2 = numpy.swapaxes(results[1],1,2)\r\n self.results.append(numpy.swapaxes(temparray2, 0, 1))\r\n\r\n if self.progress:\r\n self.progress.update(nstep, \"Done\")\r\n\r\n return True",
"def Skopt5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90), (0, 90)]\n\n # Use the seedlist from the other runs.\n seedList = [843484, 61806, 570442, 867402, 192390, 60563, 899483, 732848, 243267, 439621] \n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"RF\", n_initial_points = int(np.ceil(numIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Find out which point to sample next.\n nextParams = optimiser.ask()\n\n # Evaluate the objective function.\n nextFoM = FitnessSkopt5D(nextParams)\n\n if abs(nextFoM) > bestFoM:\n bestFoM = abs(nextFoM)\n \n # Update the model.\n optimiser.tell(nextParams, nextFoM)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return",
"def optimize_weights(self, generations):\n for gen in range(generations):\n print(\" Generation: %s\" % gen)\n self._pop_f1 = 0\n self._queue_search(self.population)\n self._queue.join()\n self._scores = {}\n while not self._results.empty():\n (index, f1) = self._results.get()\n self._scores[index] = f1\n self._pop_f1 += f1\n ranks = sorted(range(self.population_size), key=lambda s: (self._scores.get(s)))\n self._report(ranks)\n self._next_generation(ranks)",
"def _run_permutation(self, params):\n iter_df, iter_xyz = params\n iter_xyz = np.squeeze(iter_xyz)\n iter_df[[\"x\", \"y\", \"z\"]] = iter_xyz\n stat_values = self._compute_summarystat(iter_df)\n return stat_values",
"def indices(\n index_group: Literal[\"all\"] | str | IndexGroup | Sequence[str],\n ignore_error: bool = False,\n **kwargs,\n) -> Dataset:\n indices = _get_indices_of_group(index_group)\n out = None\n if \"out_file\" in kwargs.keys():\n out = kwargs[\"out_file\"]\n del kwargs[\"out_file\"]\n acc = []\n for i in indices:\n log.info(f\"Computing index '{i.short_name}'\")\n kwargs[\"index_name\"] = i.short_name\n if ignore_error:\n try:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n except Exception:\n warn(f\"Could not compute {i.short_name}.\")\n else:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n ds: Dataset = xr.merge(acc)\n if out is not None:\n _write_output_file(\n result_ds=ds,\n input_time_encoding=ds.time.encoding,\n netcdf_version=kwargs.get(\"netcdf_version\", NetcdfVersionRegistry.NETCDF4),\n file_path=out,\n )\n return ds",
"def compute_cost(indexes, matrix):\n total = 0\n for row, columns in indexes:\n total += matrix[row][columns]\n return total",
"def compute_residuals_(words, wx2_idxs, idx2_vec, idx2_aid, idx2_fx, aggregate):\n words_values = pdh.ensure_values(words)\n idx2_aid_values = pdh.ensure_values(idx2_aid)\n idx2_vec_values = pdh.ensure_values(idx2_vec)\n idx2_fx_values = pdh.ensure_values(idx2_fx)\n wx_sublist = pdh.ensure_index(wx2_idxs)\n # Build lists w.r.t. words\n idxs_list = [idxs.astype(INDEX_TYPE) for idxs in pdh.ensure_values_subset(wx2_idxs, wx_sublist)]\n aids_list = [idx2_aid_values.take(idxs) for idxs in idxs_list]\n #wx2_idxs_values = pdh.ensure_values_subset(wx2_idxs, wx_sublist)\n #idxs_list = [pdh.ensure_values(idxsdf).astype(INDEX_TYPE) for idxsdf in wx2_idxs_values] # 13 ms\n if utool.DEBUG2:\n #assert np.all(np.diff(wx_sublist) == 1), 'not dense'\n assert all([len(a) == len(b) for a, b in zip(idxs_list, aids_list)]), 'bad alignment'\n assert idx2_vec_values.shape[0] == idx2_fx_values.shape[0]\n assert idx2_vec_values.shape[0] == idx2_aid_values.shape[0]\n # Prealloc output\n if utool.VERBOSE:\n print('[smk_index] Residual Vectors for %d words. aggregate=%r' %\n (len(wx2_idxs), aggregate,))\n # Nonaggregated residuals\n #_args1 = (words_values, wx_sublist, idxs_list, idx2_vec_values)\n #rvecs_list = smk_speed.compute_nonagg_rvec_listcomp(*_args1) # 125 ms 11%\n words_list = [words_values[wx:wx + 1] for wx in wx_sublist] # 1 ms\n vecs_list = [idx2_vec_values.take(idxs, axis=0) for idxs in idxs_list] # 5.3 ms\n rvecs_list = [smk_core.get_norm_rvecs(vecs, word)\n for vecs, word in zip(vecs_list, words_list)] # 103 ms # 90%\n if aggregate:\n # Aggregate over words of the same aid\n tup = smk_speed.compute_agg_rvecs(rvecs_list, idxs_list, aids_list) # 38%\n (aggvecs_list, aggaids_list, aggidxs_list) = tup\n aggfxs_list = [[idx2_fx_values.take(idxs) for idxs in aggidxs]\n for aggidxs in aggidxs_list]\n if WITH_PANDAS:\n _args2 = (wx_sublist, aggvecs_list, aggaids_list, aggfxs_list)\n # Make aggregate dataframes\n wx2_aggvecs, wx2_aggaids, wx2_aggfxs = pdh.pandasify_agg_list(*_args2) # 617 ms 47%\n else:\n wx2_aggvecs = {wx: aggvecs for wx, aggvecs in zip(wx_sublist, aggvecs_list)}\n wx2_aggaids = {wx: aggaids for wx, aggaids in zip(wx_sublist, aggaids_list)}\n wx2_aggfxs = {wx: aggfxs for wx, aggfxs in zip(wx_sublist, aggfxs_list)}\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.check_wx2(words, wx2_aggvecs, wx2_aggaids, wx2_aggfxs)\n\n return wx2_aggvecs, wx2_aggaids, wx2_aggfxs\n else:\n # Make residuals dataframes\n # compatibility hack\n fxs_list = [[idx2_fx_values[idx:idx + 1] for idx in idxs] for idxs in idxs_list]\n if WITH_PANDAS:\n _args3 = (wx_sublist, idxs_list, rvecs_list, aids_list, fxs_list)\n wx2_rvecs, wx2_aids, wx2_fxs = pdh.pandasify_rvecs_list(*_args3) # 405 ms\n else:\n wx2_rvecs = {wx: rvecs for wx, rvecs in zip(wx_sublist, rvecs_list)}\n wx2_aids = {wx: aids for wx, aids in zip(wx_sublist, aids_list)}\n wx2_fxs = {wx: fxs for wx, fxs in zip(wx_sublist, fxs_list)}\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.check_wx2(words, wx2_rvecs, wx2_aids, wx2_fxs)\n return wx2_rvecs, wx2_aids, wx2_fxs",
"def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x",
"def flann_index_time_experiment():\n import vtool as vt\n import pyflann\n import itertools\n\n class TestDataPool(object):\n \"\"\"\n Perform only a few allocations of test data\n \"\"\"\n def __init__(self):\n self.num = 10000\n self.data_pool = None\n self.alloc_pool(1000000)\n\n def alloc_pool(self, num):\n print('[alloc] num = %r' % (num,))\n self.num = num\n self.data_pool = vt.tests.dummy.testdata_dummy_sift(num)\n print('[alloc] object size ' + ut.get_object_size_str(self.data_pool, 'data_pool'))\n\n def get_testdata(self, num):\n if len(self.data_pool) < num:\n self.alloc_pool(2 * self.num)\n return self.data_pool[0:num]\n\n pool = TestDataPool()\n\n def get_buildtime_data(**kwargs):\n flann_params = vt.get_flann_params(**kwargs)\n print('flann_params = %r' % (ut.dict_str(flann_params),))\n data_list = []\n num = 1000\n print('-----')\n for count in ut.ProgressIter(itertools.count(), nTotal=-1, freq=1, autoadjust=False):\n num = int(num * 1.2)\n print('num = %r' % (num,))\n #if num > 1E6:\n # break\n data = pool.get_testdata(num)\n print('object size ' + ut.get_object_size_str(data, 'data'))\n flann = pyflann.FLANN(**flann_params)\n with ut.Timer(verbose=False) as t:\n flann.build_index(data)\n print('t.ellapsed = %r' % (t.ellapsed,))\n if t.ellapsed > 5 or count > 1000:\n break\n data_list.append((count, num, t.ellapsed))\n print('-----')\n return data_list, flann_params\n\n data_list1, params1 = get_buildtime_data(trees=1)\n\n data_list2, params2 = get_buildtime_data(trees=2)\n\n data_list4, params4 = get_buildtime_data(trees=4)\n\n data_list8, params8 = get_buildtime_data(trees=8)\n\n data_list16, params16 = get_buildtime_data(trees=16)\n\n import plottool as pt\n\n def plotdata(data_list):\n count_arr = ut.get_list_column(data_list, 1)\n time_arr = ut.get_list_column(data_list, 2)\n pt.plot2(count_arr, time_arr, marker='-o', equal_aspect=False,\n x_label='num_vectors', y_label='FLANN build time')\n\n plotdata(data_list1)\n plotdata(data_list2)\n plotdata(data_list4)\n plotdata(data_list8)\n plotdata(data_list16)\n\n pt.iup()",
"def run_snarl_indexing(job, context, inputGraphFileIDs, graph_names, index_name=None, include_trivial=False):\n \n assert(len(inputGraphFileIDs) == len(graph_names))\n \n # Decide on an index output extension.\n extension = '.trivial.snarls' if include_trivial else '.snarls'\n \n if len(inputGraphFileIDs) > 1:\n # We have been given multiple chromosome graphs. Since snarl indexing\n # can take a lot of memory, we are going to process each one separately\n # and then concatenate the results.\n \n RealtimeLogger.info(\"Breaking up snarl computation for {}\".format(str(graph_names)))\n \n snarl_jobs = []\n for file_id, file_name in zip(inputGraphFileIDs, graph_names):\n # For each input graph, make a child job to index it.\n snarl_jobs.append(job.addChildJobFn(run_snarl_indexing, context, [file_id], [file_name],\n include_trivial=include_trivial,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk))\n \n # Make a job to concatenate the indexes all together \n concat_job = snarl_jobs[0].addFollowOnJobFn(run_concat_files, context, [job.rv() for job in snarl_jobs],\n index_name + extension if index_name is not None else None,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk)\n \n for i in range(1, len(snarl_jobs)):\n # And make it wait for all of them\n snarl_jobs[i].addFollowOn(concat_job)\n \n return concat_job.rv()\n \n else:\n # Base case: single graph\n \n RealtimeLogger.info(\"Starting snarl computation {} trivial snarls...\".format('with' if include_trivial else 'without'))\n start_time = timeit.default_timer()\n \n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Download the one graph\n graph_id = inputGraphFileIDs[0]\n graph_filename = graph_names[0]\n job.fileStore.readGlobalFile(graph_id, os.path.join(work_dir, graph_filename))\n\n # Where do we put the snarls?\n snarl_filename = os.path.join(work_dir, (index_name if index_name is not None else \"part\") + extension)\n\n # Now run the indexer.\n RealtimeLogger.info(\"Computing snarls for {}\".format(graph_filename))\n\n cmd = ['vg', 'snarls', graph_filename]\n if include_trivial:\n cmd += ['--include-trivial']\n with open(snarl_filename, \"wb\") as snarl_file:\n try:\n # Compute snarls to the correct file\n context.runner.call(job, cmd, work_dir=work_dir, outfile=snarl_file)\n except:\n # Dump everything we need to replicate the indexing\n logging.error(\"Snarl indexing failed. Dumping files.\")\n context.write_output_file(job, os.path.join(work_dir, graph_filename))\n raise\n \n if index_name is not None:\n # Checkpoint index to output store\n snarl_file_id = context.write_output_file(job, snarl_filename)\n else:\n # Just save the index as an intermediate\n snarl_file_id = context.write_intermediate_file(job, snarl_filename)\n \n \n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished computing snarls. Process took {} seconds.\".format(run_time))\n\n return snarl_file_id",
"def update_priorities(self,indexes, priorities):\r\n\r\n assert len(indexes) == len(priorities)\r\n for index, priority in zip(indexes, priorities):\r\n assert priority > 0 and 0 <= index < len(self.buffer)\r\n self._it_sum[index] = priority ** self._alpha\r\n self._it_min[index] = priority ** self._alpha\r\n self._max_priority = max(self._max_priority, priority)",
"def traj_stats(trj_files, get_stats_func, params=None, weights=None):\n\n # Assign unit weights if not specified otherwise\n if weights is None:\n weights = [1.0 for _ in range(len(trj_files))]\n \n # List of statistics and target data to be used in optimization\n stats_data = []\n target_data = []\n\n # cycle over trajectory data and save target and statistics information\n for di, (filin, weight) in enumerate(zip(trj_files, weights)):\n\n ## Target data ##\n\n # Create a target dataset directory with exhaustive target information\n target_dict = {'type':'trajectory', 'weight':weight}\n\n # read pickled trajectory dictionary\n with open(os.path.join(target_proc, filin+'.pickle'), 'rb') as fi:\n traj_dict = pickle.load(fi)\n\n # save trajectory data\n target_dict['box'] = traj_dict['box']\n target_dict['xyz'] = traj_dict['xyz']\n target_dict['energy'] = traj_dict['energy']\n target_dict['temp'] = traj_dict['temp']\n\n # read and transform forces into (6N+1) arrays\n if 'forces' in traj_dict.keys():\n target_dict['forces'] = force_targ(traj_dict['forces'])\n\n # save inverse temperature data (if T=0, set beta=1/300)\n target_dict['beta'] = np.empty_like(target_dict['temp'])\n for i, temp in enumerate(target_dict['temp']):\n if temp == 0.0:\n target_dict['beta'][i] = 1.0/300.0\n else:\n target_dict['beta'][i] = 1.0/temp\n\n\n ## Statistics data ##\n\n # Collect energy and force statistics from reference configurations\n stats_dict = {'energy':[], 'forces':[]}\n for xyz, box in zip(target_dict['xyz'], target_dict['box']):\n\n # check if box size large enough for nearest neighbor periodic boundary conditions\n if 0.5*box < sc[-1]:\n raise ValueError('box size ({box}) is too small for the force field cutoff {sc[-1]}')\n\n # calculate pair distance matrices (absolute values-rr, components-rx)\n rr, rx = pair_dist(xyz, box)\n\n # calculate sufficient statistics for energies and forces from pair distances\n a1, ar, a2, f1, fr, f2 = get_stats_func(rr, rx, sc)\n\n #print('mindist', np.where(rr > 0.0, rr, 10000.0).min())\n #print(xyz.shape, box)\n #print('x', a1.shape, rr.shape, np.sum(np.abs(a1)))\n\n stats_dict['energy'].append(np.array([ar, a2, a1]))\n stats_dict['forces'].append(np.array([fr, f2, f1]))\n\n # add datasets\n stats_data.append(stats_dict)\n target_data.append(target_dict)\n\n return stats_data, target_data",
"def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for ndx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= ndx < len(self.memory)\n self.iter_sum[ndx] = priority ** self.alpha\n self.iter_min[ndx] = priority ** self.alpha\n\n self.max_p = max(self.max_p, priority)",
"def compute_variable_indexes(path, overwrite=True, multiproc=False):\n if multiproc is True:\n tf.keras.backend.clear_session()\n set_cpu_option()\n\n gin_bindings = [\n \"evaluation.evaluation_fn = @variables_idx\",\n \"variables_idx.num_train = 10000\", \"evaluation.random_seed = 2051556033\",\n \"dataset.name='auto'\", \"evaluation.name = 'variables index'\"\n ]\n path = pathlib.Path(path)\n result_path = path.parent.parent / \"metrics\" / \"variance\" / \"filtered_variables\"\n logger.info(\"Computing variable indexes of {}\".format(path.parent.parent))\n gin_evaluation(path, result_path, overwrite, gin_bindings)",
"def update_from_indexes(self, data, **kw):\n for i in data:\n self.update_from_index(i, **kw)"
] | [
"0.57875735",
"0.5553495",
"0.54737693",
"0.54532033",
"0.5412092",
"0.5370195",
"0.53218067",
"0.52247834",
"0.5223462",
"0.5169793",
"0.5165328",
"0.5104045",
"0.5101016",
"0.51004094",
"0.5073601",
"0.50521386",
"0.5042904",
"0.50331867",
"0.50312966",
"0.5018096",
"0.49911457",
"0.4975062",
"0.49729252",
"0.49683326",
"0.49595162",
"0.4922888",
"0.49202782",
"0.49147466",
"0.49020782",
"0.48936343"
] | 0.59009445 | 0 |
Compute an online update given a precomputed minibatch of data, and a model tensor. | def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPTIMIZATION with GOLDEN RATIO
result = online.best_value_many_indices(indices_lst, assign_at_end=True)
# plotting
if plot_charts:
visualize_result_loss(result, indices_lst)
visualize_byindex(result, indices_lst, initial_value)
else:
result = None
if pbars is not None:
if 'comparison' in pbars:
assert len(users_get_value) == len(pbars['comparison'])
for user, pbar in zip(users_get_value, pbars['comparison']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
score2 = online.get_value((user, obj2, 0))
# computing the comparison
comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE
pbar.value = comparison
if 'v1' in pbars:
assert len(users_get_value) == len(pbars['v1'])
for user, pbar in zip(users_get_value, pbars['v1']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
pbar.value = score1
if 'v2' in pbars:
assert len(users_get_value) == len(pbars['v2'])
for user, pbar in zip(users_get_value, pbars['v2']):
# obtaining model scores
score1 = online.get_value((user, obj2, 0))
pbar.value = score1
return None
else:
return {
'new_model_tensor': model_tensor_copy,
'new_minibatch': mb_np_copy,
'online_learner': online,
'indices_lst': indices_lst,
'result': result,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replica_local_fn(*args, **kwargs):\n if any(\n isinstance(arg, keras_tensor.KerasTensor)\n for arg in nest.flatten((args, kwargs))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result() # pylint: disable=not-callable\n\n # We are adding the metric object as metadata on the result tensor.\n # This is required when we want to use a metric with `add_metric` API on\n # a Model/Layer in graph mode. This metric instance will later be used\n # to reset variable state after each epoch of training.\n # Example:\n # model = Model()\n # mean = Mean()\n # model.add_metric(mean(values), name='mean')\n result_t._metric_obj = self # pylint: disable=protected-access\n return result_t",
"def inject(self, model):\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n\n #### REM : C'est pas super propre ca comme manière de faire\n #### Tu rompts l'encapsulation de la classe \n model.train_function = F",
"def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))",
"def run_update(G, test_data, loc_by_img, MAX_ITERATIONS):\n CONVERGENCE_THRESHOLD = 0.00006288 # About the mean sqaured difference of 1km\n mean_squared_change = 100 # Arbitrary number above CONVERGENCE_THRESHOLD\n num_iter = 0\n has_converged = True\n while mean_squared_change > CONVERGENCE_THRESHOLD:\n if num_iter >= MAX_ITERATIONS:\n has_converged = False\n break\n num_iter += 1\n\n global update\n def update(test_img):\n img_id = test_img['watchlink']\n lat, lon, var, delta_squared = calc_update(img_id, G, loc_by_img)\n return Location(lat, lon, var), delta_squared\n\n new_loc_by_img = loc_by_img.copy()\n with mp.Pool(mp.cpu_count()) as p:\n updates = p.map(update, test_data)\n mean_squared_change = 0\n for i, test_img in enumerate(test_data):\n img_id = test_img['watchlink']\n new_loc = updates[i][0]\n delta_squared = updates[i][1]\n new_loc_by_img[img_id] = new_loc\n mean_squared_change = mean_squared_change / (i+1) * i + (delta_squared / (i + 1))\n\n loc_by_img = new_loc_by_img\n return loc_by_img, num_iter, has_converged",
"def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))",
"def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))",
"def run(self):\n\n self.sess.run(self.update_operations)",
"def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model",
"async def update_model(model_updates):\n async for model_update in model_updates:\n model_location = model_update['model_location']\n print(f\"Updating model to: {model_location}\")\n\n # using incrementing version number to keep track of live model\n # but obviously doesnt work for a real distributed system \n model_table['live_version'] += 1\n model_table['model_location'] = model_location",
"def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()",
"def _compute_raw_update(self):\n\n self.print(\"SGD with Momentum: Computing raw update...\", line_above=True)\n # Read task toml\n\n iteration_number = self.task_dict[\"iteration_number\"] + 1\n\n indices = self.get_parameter_indices(self.raw_gradient_path)\n # scale the gradients, because they can be tiny and this leads to issues\n g_t = self.get_h5_data(self.raw_gradient_path) * self.grad_scaling_fac\n\n if np.sum(np.isnan(g_t)) > 1:\n raise Exception(\n \"NaNs were found in the raw gradient.\" \"Something must be wrong.\"\n )\n\n if iteration_number == 1: # Initialize moments if needed\n shutil.copy(self.raw_gradient_path, self.moment_path)\n write_xdmf(self.moment_path)\n\n with h5py.File(self.moment_path, \"r+\") as h5:\n data = h5[\"MODEL/data\"]\n\n # initialize with zeros\n for i in indices:\n data[:, i, :] = np.zeros_like(data[:, i, :])\n\n v_t = self.beta * self.get_h5_data(self.moment_path) + (1 - self.beta) * g_t\n\n # Store first moment\n shutil.copy(\n self.moment_path,\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n )\n self.set_h5_data(\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n v_t,\n )\n\n # Correct bias\n v_t = v_t / (1 - self.beta ** (self.iteration_number + 1))\n update = self.alpha * v_t\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the raw update.\"\n \"Check if the gradient is not excessively small\"\n )\n\n # Write raw update to file for smoothing\n shutil.copy(self.raw_gradient_path, self.raw_update_path)\n self.set_h5_data(self.raw_update_path, update)",
"def train(model, config, logger, record): \n # initialize userIDs\n users_to_sample = config.users\n userIDs = np.arange(config.users) \n\n # initialize the optimizer for the server model\n dataset = assign_user_data(config, logger)\n\n # initialize the delta offset buffers and local residual buffers\n offset_buffers = []\n residual_buffers = []\n for user in range(users_to_sample):\n offset_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n residual_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n\n global_updater = GlobalUpdater(config, model.state_dict()) \n\n # before optimization, report the result first\n validate_and_log(model, dataset, config, record, logger)\n \n for comm_round in range(config.rounds):\n userIDs_candidates = userIDs[:users_to_sample]\n \n # Wait for all users updating locally\n local_packages = []\n for i, user_id in enumerate(userIDs_candidates):\n user_resource = assign_user_resource(config, user_id, \n dataset[\"train_data\"], dataset[\"user_with_data\"])\n updater = LocalUpdater(user_resource, config)\n updater.local_step(model, offset_buffers[user_id])\n local_package = updater.uplink_transmit()\n local_packages.append(local_package)\n\n # Update the global model\n global_updater.global_step(model, local_packages, residual_buffers)\n\n # Update local offsets\n update_offset_buffers(offset_buffers, \n residual_buffers,\n global_updater.accumulated_delta, \n config.tau) \n\n # log and record\n logger.info(\"Round {:d}\".format(comm_round))\n validate_and_log(model, dataset, config, record, logger)\n\n # if comm_round == config.scheduler[0]:\n # config.lr *= config.lr_scaler\n # config.scheduler.pop(0)",
"def __call__(self, *args, **kwargs):\n\n def replica_local_fn(*args, **kwargs):\n \"\"\"Updates the state of the metric in a replica-local context.\"\"\"\n if any(\n isinstance(arg, keras_tensor.KerasTensor)\n for arg in nest.flatten((args, kwargs))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result() # pylint: disable=not-callable\n\n # We are adding the metric object as metadata on the result tensor.\n # This is required when we want to use a metric with `add_metric` API on\n # a Model/Layer in graph mode. This metric instance will later be used\n # to reset variable state after each epoch of training.\n # Example:\n # model = Model()\n # mean = Mean()\n # model.add_metric(mean(values), name='mean')\n result_t._metric_obj = self # pylint: disable=protected-access\n return result_t\n\n from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top\n return distributed_training_utils.call_replica_local_fn(\n replica_local_fn, *args, **kwargs)",
"def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)",
"def fit_transform_online(self, new_ts_minibatch):\r\n S_inv = self.S_inv\r\n next_warm_start_value = self.next_warm_start_value\r\n online_history_length = self.online_history_length\r\n online_update_mode = self.online_update_mode\r\n online_trend_update_freq = self.online_trend_update_freq\r\n\r\n ts_data = self.ts_data\r\n adjusted_trend = self.adjusted_trend\r\n adjusted_season = self.adjusted_season\r\n irregular_data = self.irregular_data\r\n decomposed_data = self.decomposed_data\r\n\r\n mini_batch_len = len(new_ts_minibatch)\r\n if self.ts_data is None:\r\n raise ValueError(\"fit_transform_online can not be called in the\\\r\n very first time, should followed fit_transform.\")\r\n if mini_batch_len > online_history_length:\r\n raise ValueError(\"for online detrend, the length of mini_batch\\\r\n should not larger than the online_history_length\")\r\n if len(ts_data) < online_history_length:\r\n raise ValueError(\"for online detrend, the online_history_length\\\r\n should not larger than the initial len(ts_data)\")\r\n\r\n # check new_ts_minibatch and change format to 1d np.array\r\n data_validity_check(new_ts_minibatch)\r\n new_ts_minibatch = np.array(new_ts_minibatch).flatten()\r\n new_ts_minibatch = new_ts_minibatch.reshape(-1, 1)\r\n\r\n # update ts_data\r\n ts_data = np.concatenate([ts_data[mini_batch_len:], new_ts_minibatch])\r\n # detrend for ts_data_online_part\r\n ts_data_online = ts_data[-online_history_length:]\r\n\r\n next_warm_start_value = \\\r\n next_warm_start_value[-(online_history_length-1):]\r\n # warm_start_ini = np.hstack(\r\n # (next_warm_start_value[mini_batch_len:],\r\n # np.tile(next_warm_start_value[-1], mini_batch_len)))\r\n warm_start_ini = np.hstack((next_warm_start_value[mini_batch_len:],\r\n np.tile(0, mini_batch_len)))\r\n\r\n # count the No. of online ops to decide if do online_trend_update\r\n if self.online_count == 0:\r\n online_trend_update_toggle = True\r\n else:\r\n online_trend_update_toggle = False\r\n # update self.online_count for next use\r\n self.online_count = (self.online_count + 1) % online_trend_update_freq\r\n\r\n # do fit_transform\r\n online_mode = True\r\n self.fit_transform(ts_data_online, online_mode, S_inv, warm_start_ini,\r\n online_trend_update_toggle, mini_batch_len)\r\n adjusted_trend_online = self.adjusted_trend\r\n adjusted_season_online = self.adjusted_season\r\n irregular_data_online = self.irregular_data\r\n # decomposed_data_online = self.decomposed_data\r\n\r\n # # update trend_signal residual_signal\r\n if online_update_mode == 'only_mini_batch':\r\n adjusted_trend = np.vstack(\r\n (adjusted_trend[mini_batch_len:],\r\n adjusted_trend_online[-mini_batch_len:]))\r\n adjusted_season = np.vstack(\r\n (adjusted_season[mini_batch_len:],\r\n adjusted_season_online[-mini_batch_len:]))\r\n irregular_data = np.vstack(\r\n (irregular_data[mini_batch_len:],\r\n irregular_data_online[-mini_batch_len:]))\r\n elif online_update_mode == 'entire_online_history':\r\n update_data_len = online_history_length\r\n adjusted_trend = np.vstack(\r\n (adjusted_trend[mini_batch_len:(-update_data_len +\r\n mini_batch_len)],\r\n adjusted_trend_online))\r\n adjusted_season = np.vstack(\r\n (adjusted_season[mini_batch_len:(-update_data_len +\r\n mini_batch_len)],\r\n adjusted_season_online))\r\n irregular_data = np.vstack(\r\n (irregular_data[mini_batch_len:(-update_data_len +\r\n mini_batch_len)],\r\n irregular_data_online))\r\n else:\r\n raise ValueError(\"input para of online_update_mode is not right\")\r\n decomposed_data = np.hstack(\r\n (adjusted_trend, adjusted_season, irregular_data))\r\n\r\n self.ts_data = ts_data\r\n self.adjusted_trend = adjusted_trend\r\n self.adjusted_season = adjusted_season\r\n self.irregular_data = irregular_data\r\n self.decomposed_data = decomposed_data\r\n return decomposed_data",
"def process_minibatch(minibatch, model):\n X_train = []\n y_train = []\n # Loop through our batch and create arrays for X and y\n # so that we can fit our model at every step.\n for memory in minibatch:\n # Get stored values.\n old_state_m, action_m, reward_m, new_state_m = memory\n # Get prediction on old state.\n old_qval = model.predict(old_state_m, batch_size=1)\n # Get prediction on new state.\n newQ = model.predict(new_state_m, batch_size=1)\n # Get our best move. I think?\n maxQ = np.max(newQ)\n y = np.zeros((1, 3))\n y[:] = old_qval[:]\n # Check for terminal state.\n if reward_m != -50: # non-terminal state\n update = (reward_m + (GAMMA * maxQ))\n else: # terminal state\n update = reward_m\n #message_display('GAME OVER!!')\n # Update the value for the action we took.\n y[0][action_m] = update\n X_train.append(old_state_m.reshape(NUM_INPUT,))\n y_train.append(y.reshape(NUM_OUTPUT,))\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n\n return X_train, y_train",
"def update(\n self,\n batch: ModelInput,\n optimizer: torch.optim.Optimizer,\n target: Optional[torch.Tensor] = None,\n idx=None,\n next_obs=None,\n ) -> float:\n optimizer = cast(torch.optim.Optimizer, optimizer)\n self.train()\n optimizer.zero_grad()\n loss = self.loss(batch, target=target, idx=idx, next_obs=next_obs)\n loss.backward()\n optimizer.step()\n return loss.detach().item()",
"def process(self, sess):\n sess.run(self.p_sync) # copy weights from shared to local\n\n rollout = self.pull_batch_from_queue()\n\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.p_train_op, self.global_step]\n else:\n fetches = [self.p_train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.p_state_in[0]: batch.features[0],\n self.local_network.p_state_in[1]: batch.features[1],\n }\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n\n # Update the distctiminator\n sess = tf.get_default_session()\n self.local_steps += 1",
"def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1",
"def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss",
"def _update_model(self, verbose: bool, raw=True, smooth=False):\n if (raw and smooth) or (not raw and not smooth):\n raise InversionsonError(\"SGDM updates can be raw or smooth, not both\")\n if raw:\n gradient = (\n self.comm.lasif.lasif_comm.project.paths[\"gradients\"]\n / f\"ITERATION_{self.iteration_name}\"\n / \"summed_gradient.h5\"\n )\n if not os.path.exists(self.raw_gradient_path):\n shutil.copy(gradient, self.raw_gradient_path)\n if not os.path.exists(self.raw_update_path):\n self._compute_raw_update()\n if smooth:\n self._apply_smooth_update()",
"def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()",
"def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate",
"def momentum_update(self, online_net, target_net, momentum):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data = param_tgt.data * momentum + param_ol.data * (1. - momentum)",
"def calibrate_model(model, criterion, data_loader, neval_batches):\n model.eval()\n cpu = torch.device(\"cpu\")\n \n cnt = 0\n\n with torch.no_grad():\n for image, target in data_loader:\n image = image.to(cpu)\n target = target.to(cpu)\n output = model(image)\n loss = criterion(output, target)\n cnt += 1\n if cnt >= neval_batches:\n return",
"def process(self, sess):\n global send_counter\n \n #sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op]\n else:\n fetches = [self.train_op]\n\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n # Get current trainable variables\n # This is trainable variables\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n\n if self.num_workers > 1:\n sys.stdout.write('\\r' + str(self.local_steps))\n if self.local_steps % 100 == 0:\n global var0\n global var1\n var1 = sess.run(self.local_network.var_list) # After training\n if var0 != None:\n var_diff = [a - b for (a,b) in zip(var1, var0)]\n var_diff_data = pickle.dumps(var_diff, -1)\n print('Sync weights')\n self.msg_sent = socket_util.socket_send_data_chucks(self.sock, var_diff_data, self.mcast_destination, self.msg_sent)\n var0 = sess.run(self.local_network.var_list) # A list of numpy array\n\n # Handle each message in the socket queue\n while not self.inc_msg_q.empty():\n print('Apply remote gradients')\n # Process received grads_and_vars from other peers\n remote_var_diff_data = self.inc_msg_q.get(False)\n remote_var_diff = pickle.loads(remote_var_diff_data)\n\n add_op = [a+b for (a,b) in zip(self.local_network.var_list, remote_var_diff)]\n sess.run(add_op)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]))\n self.summary_writer.flush()\n self.local_steps += 1",
"def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)",
"def minibatch_fn(net, loss_fn, optimizer, batch):\n\n output_train = net(batch)\n loss = loss_fn(output_train, batch)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return(loss)",
"def _update(self, update_fn, value, **kwargs):\n input_tensor = ops.convert_to_tensor(\n value, name='value_in_tensor', dtype=self.dtype)\n\n return control_flow_ops.group(\n *tuple(\n _on_device_update(update_fn, v, input_tensor, **kwargs)\n for v in self.variables))",
"def E_step_precompute(self, model_params, my_suff_stat, my_data):"
] | [
"0.6139598",
"0.60502064",
"0.6037282",
"0.59968215",
"0.5993548",
"0.59875697",
"0.59847033",
"0.5928292",
"0.58975184",
"0.5886014",
"0.5876047",
"0.58682144",
"0.58607525",
"0.579297",
"0.5697605",
"0.56715125",
"0.56684655",
"0.56642056",
"0.5641743",
"0.563186",
"0.5615868",
"0.5614599",
"0.56067055",
"0.56023407",
"0.5591051",
"0.5545443",
"0.554425",
"0.55441535",
"0.55441105",
"0.5541167"
] | 0.6717393 | 0 |
Seeds the given output Image with random pixels from the source Image. | def __seed_output_image(self, src_image: Image, out_image: Image) -> None:
src_pixel_array = src_image[:, :].reshape((src_image.area, 3))
src_index_array = np.random.choice(np.arange(src_image.area), out_image.area)
out_image[:, :] = np.take(src_pixel_array, src_index_array, axis=0).reshape(out_image.shape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def randomize_pixels(image):\n shape_ = image.size()\n image_flat = image.view(-1, image.size(-1))\n shuffled_image = shuffle(image_flat)\n return shuffled_image.view(shape_)",
"def setUp(self):\n self.image = np.random.randint(\n 0, 256, size=(10, 10, 3)).astype('uint8')",
"def place_random(params: Parameters, sample_img: ndarray) -> ndarray:\n\n def start(i: int, j: int):\n return start_with_params(params, i, j)\n\n blocks = generate_blocks(params.texture_block_count, params.block_height, params.block_width,\n sample_img)\n\n output = ndarray(\n shape=(params.output_height, params.output_width, params.nchannels), dtype=np.uint8)\n\n for i in tqdm(range(params.blocks_per_height)):\n for j in range(params.blocks_per_width):\n (start_width, start_height) = start(i, j)\n (end_width, end_height) = (start_width + params.block_width,\n start_height + params.block_height)\n\n piece = random.randint(0, len(blocks) - 1)\n\n output[start_height:end_height, start_width:end_width] = blocks[piece]\n\n return output",
"def __call__(self, image):\n if random.random() < 0.5:\n image = np.flip(image, 1).copy()\n return image",
"def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()",
"def image(n, low=1, high=9):\n output = numpy.random.randint(low, high=high, size=n)\n index = numpy.random.randint(0, len(output))\n output[index] = 0\n return output",
"def sample(self, random_seed=None):\r\n if random_seed:\r\n seed(random_seed)\r\n return self._generate_mask()",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap(i, 0, x_size, -1, 1)\n y = remap(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def addMaskImage(img):\r\n [h, w, c] = img.shape\r\n h_start = np.random.randint(h/2,h-1)\r\n w_start = np.random.randint(w/2, w-1)\r\n img[h_start:h-1, :,0]= np.random.randint(0,120)\r\n img[h_start:h-1, :,1]= np.random.randint(0,120) \r\n img[h_start:h-1, :,2]= np.random.randint(0,120) \r\n img[:,w_start:w-1,0]= np.random.randint(0,120)\r\n img[:,w_start:w-1,1]= np.random.randint(0,120) \r\n img[:,w_start:w-1,2]= np.random.randint(0,120) \r\n img = np.uint8(img)\r\n return img, h_start, w_start",
"def generate_rand_img(c, w, h):\n if K.image_dim_ordering() == 'th':\n x = np.random.random((c, w, h))\n else:\n x = np.random.random((w, h, c))\n x = (x - 0.5) * 20 + 128\n return x",
"def random(self):\n self.img[:, :] = np.random.random(\n (self.l_i, self.l_i)).astype('float32')\n self.img_name = 'white_noise'",
"def random_shadow(input_image):\n height, width = input_image.shape[0], input_image.shape[1]\n [x1, x2] = np.random.choice(width, size=2, replace=False)\n k = height / float(x2 - x1)\n b = - k * x1\n im_array = input_image.copy()\n for i in range(height):\n c = int((i - b) / k)\n im_array[i, :c, :] = (im_array[i, :c, :] * .5).astype(np.uint8)\n return im_array",
"def clone_rand(self):",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'",
"def generate_mask(\n self,\n noise_background,\n noise_value,\n generated_points_x,\n generated_points_y,\n xsize,\n ysize,\n ):\n\n # background of noise mask\n img_mask = np.random.randint(\n noise_background[0],\n noise_background[1] + 1,\n (ysize, xsize),\n )\n\n # mask of random value\n img_mask_random = np.random.randint(\n low=noise_value[0],\n high=noise_value[1] + 1,\n size=(ysize, xsize),\n )\n\n # insert random value into background\n img_mask[generated_points_y, generated_points_x] = img_mask_random[generated_points_y, generated_points_x]\n\n return img_mask.astype(\"uint8\")",
"def random_transform(self, x, seed=None):\n # x is a single audio, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if not (self.zoom_range[0] == 1 and self.zoom_range[1] == 1):\n zx = np.random.uniform(self.zoom_range[0], self.zoom_range[1])\n input_length = x.shape[img_row_axis]\n x = resample(x, num=int(zx * x.shape[img_row_axis]), axis=img_row_axis)\n if x.shape[img_row_axis] >= input_length:\n x = x[:input_length]\n else:\n x = np.pad(x, ((0, input_length - x.shape[img_row_axis]), (0, 0)),\n 'constant', constant_values=(0, np.mean(x)))\n\n if shift:\n hx = np.random.uniform(-self.shift, self.shift)\n x = shift(x, (int(hx * x.shape[img_row_axis]), 0), mode=self.fill_mode, cval=self.cval)\n\n if self.roll_range:\n tx = np.random.uniform(-self.roll_range, self.roll_range)\n if self.roll_range < 1:\n tx *= x.shape[img_row_axis]\n x = np.roll(x, int(tx), axis=(img_row_axis))\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = np.flip(x, axis=img_row_axis)\n\n if (self.noise):\n if np.random.random() < 0.5:\n if self.noise[-1] == 'Uniform':\n x = x + np.random.uniform(self.noise[0], self.noise[1], size=x.shape)\n elif self.noise[-1] == 'Normal':\n x = x + np.random.normal(self.noise[0], self.noise[1], size=x.shape)\n\n if self.brightness_range is not None:\n x = random_brightness(x, self.brightness_range)\n\n return x",
"def generate_rand_image(image, groundtruth, noise=True, flip=True):\n # Get the size of the image\n x_size, y_size = image.size\n\n def rotate_augmentation():\n \"\"\"Generate a function to perform a random rotation of an image\n using mirroring for padding\"\"\"\n rand_rotate = np.random.randint(180)\n return lambda image: rotate_with_extension(image, rand_rotate)\n\n def shift_augmentation():\n \"\"\"Generates a function to perform a random shift of the image using mirroring\n for padding\"\"\"\n shift = np.random.randint(-200, 201, size=2)\n return lambda image: shift_with_extension(image, shift)\n\n def zoom_augmentation():\n \"\"\"Generates a function that performs a random zoom on the image\"\"\"\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)\n\n def flip_augmentation():\n \"\"\"Generates a function to flip the image\"\"\"\n return lambda image: ImageOps.flip(image)\n\n def mirror_augmentation():\n \"\"\"Generates a function to mirror an image\"\"\"\n return lambda image: ImageOps.mirror(image)\n\n # All possible augmentations\n augmentations = [rotate_augmentation(), shift_augmentation(), zoom_augmentation(),\n flip_augmentation(), mirror_augmentation()]\n\n # Loop through all augmentations and apply each one with a probability of 0.5\n for augmentation in augmentations:\n if np.random.randint(2) == 1:\n image = augmentation(image)\n groundtruth = augmentation(groundtruth)\n\n # Add salt or pepper noise each one with a probability of 0.33\n if noise:\n noises = [\"s&p\", \"gauss\"]\n num_noises = len(noises)\n # Choose noise to apply\n noise_rand = np.random.randint(num_noises + 1)\n # apply the noise only to the image and not the groundtruth\n if noise_rand < num_noises:\n image = add_noise(image, type=noises[noise_rand])\n\n return (image, groundtruth)",
"def generate_noise(\n self,\n noise_iteration=(1, 1),\n noise_size=(1, 1),\n noise_value=(0, 128),\n noise_background=(255, 255),\n noise_sparsity=(0.4, 0.6),\n noise_concentration=(0.4, 0.6),\n xsize=1500,\n ysize=1500,\n ):\n\n # generate random iterations\n iterations = random.randint(noise_iteration[0], noise_iteration[1])\n\n # generate background value\n background_value = random.randint(noise_background[0], noise_background[1])\n\n # initialize blank noise mask\n img_mask = np.full((ysize, xsize), fill_value=background_value, dtype=\"int\")\n\n # any invalid noise type will reset noise type to 0\n if self.noise_type not in [1, 2, 3, 4, 5]:\n noise_type = random.randint(1, 5)\n else:\n noise_type = self.noise_type\n\n # random location with no sides if no side is chosen\n if self.noise_side not in self.sides:\n noise_side = random.choice(self.sides)\n else:\n noise_side = self.noise_side\n\n # loop each iterations\n for _ in range(iterations):\n\n # divider to rescale noise mask to larger size\n y_divider = random.randint(noise_size[0], noise_size[1])\n x_divider = random.randint(noise_size[0], noise_size[1])\n\n # generate noise mask for current iteration\n img_mask_temporary = self.generate_mask_main(\n noise_type,\n noise_side,\n noise_value,\n noise_background,\n noise_sparsity,\n noise_concentration,\n int(xsize / x_divider),\n int(ysize / y_divider),\n )\n img_mask_temporary = cv2.resize(\n img_mask_temporary.astype(\"uint8\"),\n (xsize, ysize),\n interpolation=cv2.INTER_CUBIC,\n )\n\n # merge noise mask in each iteration by getting their min value\n img_mask = np.minimum(img_mask_temporary, img_mask)\n\n # output needs uint8 type\n img_mask = img_mask.astype(\"uint8\")\n\n return img_mask",
"def sample_image(generator, n_row, batches_done):\r\n # Sample noise\r\n z = Variable(float_tensor(np.random.normal(0, 1, (n_row ** 2, args.latent_dim))))\r\n labels = np.array([num for _ in range(n_row) for num in range(n_row)])\r\n labels = Variable(long_tensor(labels))\r\n gen_imgs = generator(z, labels)\r\n save_image(gen_imgs.data, \"images/%d.png\" % batches_done, nrow=n_row, normalize=True)",
"def random_crop(self, img, output_img_h = 0.5, output_img_w = 0.5, p = 0.5):\n if self.decision(p):\n height, width, channels = img.shape\n new_height = random.randint(int(height * output_img_h), height)\n new_width = random.randint(int(width * output_img_w), width)\n y = random.randint(0, height - new_height)\n x = random.randint(0, width - new_width)\n roi = img[y:y + new_height, x:x + new_width]\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img",
"def generateRandomMask(size, p=0.5):\n mask_array = (np.random.random(size) > p).astype(int)\n mask = sitk.GetImageFromArray(mask_array) \n return mask",
"def sample_image(n_row, batches_done):\n # Sample noise\n z = Variable(Tensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))\n gen_imgs = decoder(z)\n save_image(\n gen_imgs.data, \"images/%d.png\" % batches_done, nrow=n_row, normalize=True\n )",
"def __call__(self, img, target):\n if random.random() < 0.5:\n img = ImageEnhance.Brightness(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Color(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Contrast(img).enhance(0.5 + random.random())\n return img, target",
"def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def rand(self):\n raise NotImplementedError",
"def test_image(filename, x_size=def_x_size, y_size=def_y_size):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)",
"def random_padding(self, image, output_size, override_random=None):\n h_img, w_img, ch_img = image.shape\n h_output, w_output = output_size\n\n asser_msg = (\"For Random padding input image Hight must be less or equal to \"\n \"output_size hight\")\n assert h_img <= h_output, asser_msg\n assert_msg = (\"For Random padding input image Width must be less or equal to \"\n \"output_size width\")\n assert w_img <= w_output, assert_msg\n\n output_image = np.zeros((h_output, w_output, ch_img), dtype=np.float32)\n\n if override_random is None:\n pad_h_up = randint(0, h_output - h_img)\n pad_w_left = randint(0, w_output - w_img)\n pad_h_down = h_output - h_img - pad_h_up\n pad_w_right = w_output - w_img - pad_w_left\n else:\n pad_h_up = override_random[0]\n pad_w_left = override_random[1]\n pad_h_down = h_output - h_img - pad_h_up\n pad_w_right = w_output - w_img - pad_w_left\n\n output_image = np.pad(image, ((pad_h_up, pad_h_down), (pad_w_left, pad_w_right), (0, 0)),\n 'constant', constant_values=0)\n\n return output_image, (pad_h_up, pad_w_left)"
] | [
"0.68415916",
"0.65006113",
"0.6458804",
"0.63344526",
"0.6230885",
"0.62036294",
"0.6179382",
"0.5990032",
"0.5980545",
"0.5975108",
"0.59486",
"0.59019417",
"0.5884772",
"0.58742434",
"0.58631706",
"0.58573",
"0.58512574",
"0.5831452",
"0.5810674",
"0.57992274",
"0.5780533",
"0.57777715",
"0.57703",
"0.5754931",
"0.5746372",
"0.5746372",
"0.5746372",
"0.5742517",
"0.5726827",
"0.5711834"
] | 0.81843376 | 0 |
Renders the given pixel in the specified layer of the output Pyramid using the colour of a pixel from the source Pyramid with the closest neighbourhood to the output pixel. | def __render_output_pixel(self, src_pyramid: Pyramid, out_pyramid: Pyramid, level: int, out_point: Point) -> None:
if level == self.__levels - 1:
distances = self.__make_distance_matrix(src_pyramid[level], out_pyramid[level], self.__neighbourhood_padding[level], out_point, True)
else:
prev_distances = self.__make_distance_matrix(src_pyramid[level + 1], out_pyramid[level + 1], self.__neighbourhood_padding[level + 1], out_point // 2, False)
next_distances = self.__make_distance_matrix(src_pyramid[level], out_pyramid[level], self.__neighbourhood_padding[level], out_point, True)
distances = next_distances + np.kron(prev_distances, np.ones((2, 2)))
candidate = np.unravel_index(np.argmin(distances), distances.shape)
out_pyramid[level][out_point] = src_pyramid[level][candidate] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_pixel(game_map: PreGameMap, coord: Coordinate):\n if not((0 <= coord[0] < game_map.size[0]) and (0 <= coord[1] < game_map.size[1])):\n return\n terrain = TERRAIN.get(coord, None)\n if terrain == 'sea':\n game_map.display_coord(coord, 'blue')\n return\n if terrain is None:\n game_map.display_coord(coord, 'black')\n return\n value = FOREST.get(coord, 0)\n game_map.display_coord(coord, '#' + str(min(value, 9)) + '00000')",
"def pixel(self, x: int, y: int, colour: int, /) -> None:",
"def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])",
"def pixel( self, x, y, c = '#ffffff' ):\n self.raster.put( c, ( x, y ) )",
"def intermediate_pixel(alpha, source_RGB, target_RGB):\n return int((1-alpha)*source_RGB+alpha*target_RGB)",
"def draw_pixel_to_display(self):\n register = self.return_middle_registers(self.opcode)\n x = self.registers[register[0]]\n y = self.registers[register[1]]\n height = self.opcode & 0xF\n\n self.registers[0xF] = 0\n\n x = bit_utils.wrap_around(x, self.display.width)\n y = bit_utils.wrap_around(y, self.display.height)\n\n for yline in range(0, height):\n pixels = self.memory[self.I + yline]\n y1 = bit_utils.wrap_around(y + yline, self.display.height)\n for xline in range(0, 8):\n x1 = bit_utils.wrap_around(x + xline, self.display.width)\n if pixels & (0x80 >> xline) != 0:\n if self.display.set_pixel(x1, y1):\n self.registers[0xF] = 1\n\n self.display.draw_flag = True\n logger.info(\"Drawing sprite from {} to {} at {}, {}\".format(\n hex(self.I),\n hex(self.I + height),\n x, y))",
"def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)",
"def write_pixel(color, img_size, location, image, scale_factor):\n x_location = scale(location.item(0), scale_factor)\n y_location = scale(location.item(1), scale_factor)\n\n img_cont = int(img_size/100)\n if img_cont == 0:\n image.putpixel((x_location, y_location), color)\n else:\n write_to_range(x_location-img_cont, x_location+img_cont, y_location-img_cont, y_location+img_cont, color, image, img_size)",
"def _color(self, x, factor):\r\n factor = (factor/MAX_LEVEL) * 1.8 + .1\r\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(x))\r\n x = tfa.image.blend(degenerate, tf.cast(x, tf.float32), factor)\r\n return tf.saturate_cast(x, tf.uint8)",
"def draw(self, color = Color.GREEN):\n self.image[self.x, self.y] = color",
"def tiled_writing(red, nir, output):\n \n #open datasets\n src_red = rio.open(red)\n src_nir = rio.open(nir)\n \n #define raster properies and update datatype\n meta = src_red.meta.copy()\n meta.update({'dtype':'float32'}) # meta is a dictionary\n outfile = output\n #open outfile in writing mode with the properties of defined raster band\n with rio.open(outfile, 'w', **meta) as dst:\n #iterate over blocks of the bands, calculate ndvi for each block \n # and put the blocks back together\n for window in calc_tiles(src_red, tile_size_x, tile_size_y):\n red_block = src_red.read(window=window, masked=True)\n nir_block = src_nir.read(window=window, masked=True)\n #cast ndarrays to Float32 type\n red = red_block.astype('f4')\n nir = nir_block.astype('f4')\n #allow division by zero\n np.seterr(divide='ignore', invalid='ignore')\n #calculate ndvi and write raster\n ndvi = (nir - red) / (nir + red)\n dst.write(ndvi, window=window)\n\n #close dataset\n src_red.close()\n src_nir.close()\n return outfile",
"def pixel(self, x, y, color):\n self._set_window(x, y, x, y)\n self._write(None, _encode_pixel(color))",
"def display_layers(layers, wide, tall):\n\n colours = {\n \"0\": \" \",\n \"1\": \" # \",\n }\n\n for row in range(tall):\n for col in range(wide):\n pixels = [layer[row][col] for layer in layers]\n line = next(colours[p] for p in pixels if p in colours)\n print(line, end=\"\")\n print()",
"def IntermediateColor(startcol, targetcol, frac):\n if frac < 0:\n frac = 0\n if frac >= 1.0:\n frac = 1.0\n sc = MakeColorTuple(startcol)\n tc = MakeColorTuple(targetcol)\n dR = tc[0] - sc[0]\n dG = tc[1] - sc[1]\n dB = tc[2] - sc[2]\n R = sc[0] + dR * frac\n G = sc[1] + dG * frac\n B = sc[2] + dB * frac\n return \"#%02x%02x%02x\" % (R, G, B)",
"def draw_overlay(radius, color, sub_result):\n\tblurred_circle = draw_gauss(radius, color).reshape((-1, 4))\n\tflat_sub_result = sub_result.reshape((-1, 4))\n\n\tresult_alpha = flat_sub_result[:,3].astype('float')\n\tcircle_alpha = blurred_circle[:,3].astype('float')\n\talpha_sum = result_alpha + circle_alpha\n\talpha = circle_alpha / alpha_sum\n\talpha_inv = 1 - alpha\n\n\tnew_alpha = np.minimum(alpha_sum, 255)\n\n\thue_channel = blurred_circle[:,0] * alpha + flat_sub_result[:,0] * alpha_inv\n\tsat_channel = blurred_circle[:,1] * alpha + flat_sub_result[:,1] * alpha_inv\n\tval_channel = blurred_circle[:,2] * alpha + flat_sub_result[:,2] * alpha_inv\n\n\tchans = np.dstack((hue_channel.astype('uint8'), sat_channel.astype('uint8'), val_channel.astype('uint8'), new_alpha.astype('uint8')))\n\treturn chans.reshape(sub_result.shape)",
"def channel_blend(pixSrc, pixPng, srcH, srcW, x, y, mode='weight', color_match=False):\n modes = [item for i, item in blend_mode.items()]\n # 1.find all indices satisfying conditions, and replace the value of indices in source image with logo image.\n # note: from pillow to numpy, (w,h) has converted to (h,w).\n index = np.where(pixPng[:, :, 3] > 15)\n y_id = index[0] + y - 1\n x_id = index[1] + x - 1\n\n # ensure the exceeding part remained in boundary.\n y_id = np.where(y_id >= srcH, srcH - 1, y_id)\n x_id = np.where(x_id >= srcW, srcW - 1, x_id)\n id = (y_id, x_id)\n\n # matching logo color with source image.\n if color_match:\n pixSrc_ = pixSrc.copy()[..., :3]\n pixPng_ = pixPng.copy()[..., :3]\n mean_source, stddev_source = cv2.meanStdDev(pixSrc_)\n mean_png, stddev_png = cv2.meanStdDev(pixPng_)\n mdiff = mean_png - mean_source\n mdiff = np.array(mdiff).reshape((1, 1, 3))\n pixPng_ = pixPng_.astype(np.float64)\n pixPng_ -= mdiff\n pixPng_ = np.clip(pixPng_, 0, 255)\n pixPng_ = pixPng_.astype(np.uint8)\n pixPng[..., :3] = pixPng_\n\n if mode not in modes: raise NotImplementedError(\n \"only {0:'naive',1:'weight',2:'poisson',3:'multiply'} are supported.\")\n if mode == 'weight':\n pixSrc = weight_paste(pixSrc, pixPng, id, index)\n elif mode == 'naive':\n pixSrc = naive_paste(pixSrc, pixPng, id, index)\n elif mode == 'poisson':\n pixSrc = poisson_blend(pixSrc, pixPng, id, index, x, y)\n elif mode == 'multiply':\n pixSrc = multiply(pixSrc, pixPng, id, index)\n\n return cv2.cvtColor(pixSrc, cv2.COLOR_RGBA2RGB)",
"def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)",
"def blend(self, alignment_offset):\n stitched_image = None\n\n return stitched_image",
"def color_pixels(self, image, color):\r\n\r\n image[self.ally, self.allx] = color\r\n return image",
"def retrieve_pixel(self, x, y, index):\n pass",
"def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127",
"def pixel(self, x: int, y: int, color: int):\n if (\n (x < self.size[0] and y < self.size[1]) and (x >= 0 and y >= 0)\n ):\n index, offset = self.position(x, y)\n self.image[index] = (\n self.image[index] & ~(0x01 << offset)\n ) | (\n (color != 0) << offset\n )\n else:\n return",
"def set_pixel(image, pt, color):\n\timage[pt[0], pt[1]] = color",
"def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]",
"def _sample_to_pixel(self, y_sample, y2, h2, h_pixels):\n return (h_pixels - 1.) / h2 * (y_sample - y2)",
"def putpixel(self, col, row, color=GREEN):\n if col < 0 or row < 0:\n return\n try:\n self.vram[row][col] = color\n except IndexError:\n pass",
"def paint_pixel(game_map: GameMap, coord: Coordinate, strength: int):\n if not((0 <= coord[0] < game_map.size[0]) and (0 <= coord[1] < game_map.size[1])):\n return\n if TERRAIN[coord] == 'sea':\n return\n current = FOREST.get(coord, 0)\n FOREST[coord] = min(current + strength, 9)",
"def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)",
"def get_pixel(self, frame: int, x: int, y: int) -> Color:\n return self.get_frame(frame).clone()[x, y]",
"def set_pixel(self, framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 2\n framebuf.buf[index : index + 2] = self.color_to_rgb565(color)"
] | [
"0.5802858",
"0.5658782",
"0.54832906",
"0.54623395",
"0.5383574",
"0.53326017",
"0.5214478",
"0.520735",
"0.5191128",
"0.51251477",
"0.50873256",
"0.5066412",
"0.49981758",
"0.49971747",
"0.49680513",
"0.49348786",
"0.49256635",
"0.4886838",
"0.48779047",
"0.48769373",
"0.48626304",
"0.48603973",
"0.48523787",
"0.48455152",
"0.484396",
"0.48390007",
"0.48245782",
"0.4799429",
"0.47968972",
"0.47873884"
] | 0.74728173 | 0 |
Returns a matrix containing the weighted squared difference of the pixel values between each window in the source Image and the window extracted from the output Image at the specified Point with the given padding. | def __make_distance_matrix(self, src_image: Image, out_image: Image, padding: int, out_point: Point, causal: bool) -> np.ndarray:
# Extract the reference window and for the neighbourhood matching.
out_window = out_image.extract(out_point, padding, 'wrap')
out_filled = out_image.filled(out_point, padding, 'wrap', causal)
# Construct a 2D Gaussian kernel that matches the padding size.
gaussian_1D = signal.gaussian(2 * padding + 1, std=padding)
gaussian_2D = np.outer(gaussian_1D, gaussian_1D)
gaussian_2X = np.extract(out_filled, gaussian_2D)
# Return the weighted squared difference of each neighbourhood in the
# source Image with respect to the reference window.
return self._apply_distance_filter(src_image, out_window, out_filled, gaussian_2X) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))",
"def padding(self):\n\t\treturn self.paddings_shape_param('W')",
"def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result",
"def pad_image(input_img, window_size, padding_mode='symmetric'):\n assert np.isscalar(window_size)\n assert window_size % 2 == 1\n\n # Padding width must be window_size-1 and divided by 2. So that we can check every pixels\n pad_width = int((window_size-1)/2)\n # For each padding_mode, pad differently\n\n # But in result, I chose symmetric cause it seems to have smallest aepe\n if padding_mode == 'symmetric':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'reflect':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'constant':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n\n return padded_img",
"def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])",
"def conv_backward(dZ, A_prev, W, b, padding=\"same\", stride=(1, 1)):\n (m, h_prev, w_prev, c_prev) = A_prev.shape\n (m, h_new, w_new, c_new) = dZ.shape\n (kh, kw, c_prev, c_new) = W.shape\n sh, sw = stride\n if padding == 'same':\n ph = int(np.ceil((((h_prev - 1) * sh + kh - h_prev) / 2)))\n pw = int(np.ceil((((w_prev - 1) * sw + kw - w_prev) / 2)))\n if padding == 'valid':\n pw = 0\n ph = 0\n dA_prev = np.zeros(A_prev.shape)\n dW = np.zeros(W.shape)\n db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)\n A_prev_pad = np.pad(A_prev, pad_width=((0, 0), (ph, ph), (pw, pw),\n (0, 0)), mode='constant')\n dA_prev_pad = np.pad(dA_prev, pad_width=((0, 0), (ph, ph), (pw, pw),\n (0, 0)), mode='constant')\n for i in range(m):\n a_prev_pad = A_prev_pad[i]\n da_prev_pad = dA_prev_pad[i]\n for h in range(h_new):\n for w in range(w_new):\n for c in range(c_new):\n v_beg = h * sh\n v_end = v_beg + kh\n h_start = w * sw\n h_end = h_start + kw\n a_slice = a_prev_pad[v_beg:v_end, h_start:h_end]\n da_prev_pad[v_beg:v_end,\n h_start:h_end] += \\\n W[:, :, :, c] * dZ[i, h, w, c]\n dW[:, :, :, c] += a_slice * dZ[i, h, w, c]\n\n if padding == 'same':\n dA_prev[i, :, :, :] += da_prev_pad[ph:-ph, pw:-pw, :]\n if padding == 'valid':\n dA_prev[i, :, :, :] += da_prev_pad\n\n return dA_prev, dW, db",
"def windowing(input):\n return input * hamming(input.shape[1], sym=0)",
"def calculateWindow(image, coord, window_size):\r\n image_shape = np.shape(image)\r\n sz = (window_size-1)//2\r\n x_list = np.arange(coord[0]-sz, coord[0]+sz+1, 1)\r\n y_list = np.arange(coord[1]-sz, coord[1]+sz+1, 1)\r\n xs = [x for x in x_list if (x >= 0) and (x < image_shape[0])]\r\n ys = [y for y in y_list if (y >= 0) and (y < image_shape[1])]\r\n x1 = np.min(xs)\r\n x2 = np.max(xs)\r\n y1 = np.min(ys)\r\n y2 = np.max(ys)\r\n return image[x1:x2+1, y1:y2+1]",
"def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin",
"def smdm_normalize(images, window, padding, name=\"unnamed_smdm_normalize\"):\n\tMEDIAN_JITTER = tf.constant(1e-8)\n\t\n\tif window % 2 == 0:\n\t\traise ValueError(\"attempted to smdm_normalize() with even-sized window\")\n\n\timages = tf.cast(images, tf.float32)\n\tbatch_size, height, width, channels = tf.shape(images)[0], tf.shape(images)[1], tf.shape(images)[2], tf.shape(images)[3]\n\n\tspatial_last = tf.transpose(images, (0, 3, 1, 2))\n\tspatial_last_and_flat = tf.reshape(spatial_last, (batch_size, channels, -1))\n\tn = tf.multiply(height, width)\n\tk = tf.to_int32(tf.divide(n, 2)) + 1\n\ttop_k = tf.nn.top_k(spatial_last_and_flat, k, name=name + \"_top_half_of_images\")[0]\n\tmedians_spatial_last_and_flat = tf.cond(\n\t\ttf.equal(tf.mod(n, 2), 0),\n\t\tlambda: tf.reduce_mean(top_k[:, :, k - 2: k], -1, keep_dims=True),\n\t\tlambda: top_k[:, :, k - 1]\n\t)\n\tmedians_spatial_last_and_flat = tf.add(\n\t\tmedians_spatial_last_and_flat,\n\t\ttf.fill(tf.shape(medians_spatial_last_and_flat), MEDIAN_JITTER)\n\t)\n\tmedians_spatial_last = tf.expand_dims(medians_spatial_last_and_flat, 3)\n\tmedians = tf.transpose(medians_spatial_last, (0, 2, 3, 1))\n\timages = tf.divide(images, medians, name=name + \"_divide_images_by_medians\")\n\n\tpadding_amount = int((window - 1) / 2)\n\tpadding_amounts = ((0, 0), (padding_amount, padding_amount), (padding_amount, padding_amount), (0, 0))\n\timages_padded = tf.pad(images, padding_amounts, padding)\n\tlocal_means = tf.nn.pool(images_padded, (window, window), \"AVG\", \"VALID\", name=name + \"_local_means_of_images\")\n\timages = tf.subtract(images, local_means, name=name + \"_subtract_local_means_from_images\")\n\n\treturn images",
"def _extract_samples_with_padding(image, label):\n if label[I_INDEX] < 0:\n image = util.pad(image, ((label[I_INDEX] * -1, 0), (0, 0), (0, 0)), 'constant')\n label[I_INDEX] = 0\n if label[J_INDEX] < 0:\n image = util.pad(image, ((0, 0), (label[2] * -1, 0), (0, 0)), 'constant')\n label[J_INDEX] = 0\n if label[I_INDEX] + label[HEIGHT_INDEX] > image.shape[0]:\n image = util.pad(image, ((image.shape[0] + label[I_INDEX] + label[HEIGHT_INDEX], 0), (0, 0), (0, 0)),\n 'constant')\n if label[I_INDEX] < 0:\n image = util.pad(image, ((0, 0), (0, image.shape[0] + label[J_INDEX] + label[WIDTH_INDEX]), (0, 0)), 'constant')\n\n return image[label[I_INDEX]:label[I_INDEX] + label[HEIGHT_INDEX],\n label[J_INDEX]:label[J_INDEX] + label[WIDTH_INDEX]]",
"def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2",
"def calculate_weights(self):\n weights = []\n for x in range(frame.rad, frame.window_x, frame.dx):\n for y in range(frame.rad, frame.window_y, frame.dy):\n obj = new_frame.create_general_object(x,y)\n hist = new_frame.create_general_hist(obj)\n # compare histograms to find weight\n weight = cv2.compareHist(frame.hist, hist, method=cv2.cv.CV_COMP_CORREL)\n # find distance away from old point, and normalize by max distance\n max_distance = float(self.find_hypotenuse(frame.window_x, frame.window_y))\n distance = self.find_hypotenuse(x-frame.x, y-frame.y) / max_distance\n # subtract normalized distance from weight\n weight = weight - distance\n # make sure no weights are negative\n if weight < 0:\n weight = 0\n # append weights to array\n weights.append(weight)\n self.weights = np.array(weights)",
"def find_w_size(self, name, output_width=30, min_w=10, max_w=75, step=5, offset=0, \n sum_=False, window=None):\n \n loss_arr = []\n val_loss_arr = []\n \n print(\"Finding best window size..\")\n print(f\"Model: {name}, output size: {output_width}\\n\")\n \n if window:\n \n self.create_train_test(name=name, f_size=window, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n else:\n \n for i in range(min_w, max_w, step):\n \n self.create_train_test(name=name, f_size=i, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n print(f\"For window of {i} values, MAPE = {loss}\")\n loss_arr.append(loss)\n val_loss_arr.append(val_loss)\n \n temp = np.insert(val_loss_arr, 0, val_loss_arr[0])\n temp = np.append(temp, val_loss_arr[-1])\n \n smooth = np.convolve(temp, [1, 2, 1], mode='valid')\n \n if (len(smooth)-np.argmin(smooth)) > 4:\n break\n \n print(\"Done\")\n \n val_loss_arr = np.insert(val_loss_arr, 0, val_loss_arr[0])\n val_loss_arr = np.append(val_loss_arr, val_loss_arr[-1])\n val_loss_arr_smooth = np.convolve(val_loss_arr, [1, 2, 1], mode='valid') \n \n idx = np.argmin(val_loss_arr_smooth)\n \n window_size = range(min_w, max_w, step)[idx]\n \n range_ = range(min_w, max_w, step)[:len(loss_arr)]\n plt.plot(range_, loss_arr, label=\"loss\", color=\"#33638DFF\")\n plt.plot(range_, val_loss_arr[1:-1], label=\"val_loss\", color=\"#3CBB75FF\")\n plt.plot(range_, val_loss_arr_smooth/4, \n label=\"smooth_val_loss\", color=\"#d18756\")\n \n plt.axvline(x=window_size, linestyle=\"--\", c=\"black\", lw=1)\n plt.legend()\n plt.title(name + \" model\")\n plt.xlabel(\"window size\")\n plt.ylabel(\"loss\")\n plt.show()\n \n print(f\"Best window size for {name} is {window_size}\\n\")\n\n return window_size",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def padding(self) -> Tuple[int, int, int, int]:\n return (self.ipadding[0].to_pixels(self.width),\n self.ipadding[1].to_pixels(self.width),\n self.ipadding[2].to_pixels(self.height),\n self.ipadding[3].to_pixels(self.height))",
"def conv2d(x, W, strides=(1, 1, 1, 1), padding='SAME', dilation_rate=(1, 1), name='conv2d'):\n x_shape = x.get_shape().as_list()\n x_shape = [s if isinstance(s, int) else -1 for s in x_shape]\n W_shape = W.get_shape().as_list()\n padding_x = None\n padding_y = None\n \n if padding == \"ZEROPAD\":\n if len(x_shape) == 5:\n s = strides[1:3]\n i = (int(x_shape[2] / s[0]), int(x_shape[3] / s[1]))\n elif len(x_shape) == 4:\n s = strides[1:3]\n i = (int(x_shape[1] / s[0]), int(x_shape[2] / s[1]))\n else:\n raise ValueError(\"invalid input shape\")\n # --\n kernel_x = W_shape[0]\n kernel_y = W_shape[1]\n padding_x = int(np.ceil((i[0] - s[0] - i[0] + kernel_x + (kernel_x - 1) * (dilation_rate[0] - 1)) / (s[0] * 2)))\n padding_y = int(np.ceil((i[1] - s[1] - i[1] + kernel_y + (kernel_y - 1) * (dilation_rate[1] - 1)) / (s[1] * 2)))\n elif (isinstance(padding, list) or isinstance(padding, tuple)) and len(padding) == 2:\n padding_x = padding[0]\n padding_y = padding[1]\n \n if padding_x is not None and padding_y is not None:\n if len(x_shape) == 5:\n pad = [[0, 0], [0, 0], [padding_x, padding_x], [padding_y, padding_y], [0, 0]]\n elif len(x_shape) == 4:\n pad = [[0, 0], [padding_x, padding_x], [padding_y, padding_y], [0, 0]]\n \n # pad input with zeros\n x = tf.pad(x, pad, \"CONSTANT\")\n # set padding method for convolutions to valid to not add additional padding\n padding = \"VALID\"\n elif padding not in (\"SAME\", \"VALID\"):\n raise ValueError(\"unsupported padding type\")\n \n if dilation_rate == (1, 1):\n def conv_fct(inp):\n return tf.nn.conv2d(input=inp, filter=W, padding=padding, strides=strides, name=name)\n else:\n if (strides[0] != 1) or (strides[-1] != 1):\n raise AttributeError(\"Striding in combination with dilation is only possible along the spatial dimensions,\"\n \"i.e. strides[0] and strides[-1] have to be 1.\")\n \n def conv_fct(inp):\n return tf.nn.convolution(input=inp, filter=W, dilation_rate=dilation_rate,\n padding=padding, strides=strides[1:3], name=name)\n \n # Flatten matrix in first dimensions if necessary (join samples and sequence positions)\n with tf.variable_scope(name):\n if len(x_shape) > 4:\n x_shape = [s if isinstance(s, int) else -1 for s in x.get_shape().as_list()]\n if x_shape[0] == -1 or x_shape[1] == -1:\n x_flat = tf.reshape(x, [-1] + x_shape[2:])\n else:\n x_flat = tf.reshape(x, [x_shape[0] * x_shape[1]] + x_shape[2:])\n conv = conv_fct(x_flat)\n conv = tf.reshape(conv, x_shape[:2] + conv.get_shape().as_list()[1:])\n else:\n conv = conv_fct(x)\n return conv",
"def roll(self, image):\n\n\t\toutput = np.zeros(image.shape)\n\n\t\theight, width = image.shape\n\n\t\tfor y in range(height):\n\n\t\t\t# Getting available neighbour indexes in y direction.\n\t\t\tdelta_y_0 = abs(min(y - self.padding, 0))\n\t\t\tdelta_y_1 = min( height - 1 - y, self.padding) + self.padding + 1\n\n\t\t\tfor x in range(width):\n\n\t\t\t\t# Getting available neighbour indexes in x direction.\n\t\t\t\tdelta_x_0 = abs( min(x - self.padding, 0)) \n\t\t\t\tdelta_x_1 = min( width - 1 - x, self.padding) + self.padding + 1\n\n\t\t\t\t# Taking a grid of pixels from the image.\n\t\t\t\tgrid = image[ \n\t\t\t\t\ty - (self.padding - delta_y_0) : y + (delta_y_1 - self.padding),\n\t\t\t\t\tx - (self.padding - delta_x_0) : x + (delta_x_1 - self.padding)\n\t\t\t\t]\n\n\t\t\t\tpixel = self.apply(grid, slice(delta_x_0, delta_x_1 ), slice(delta_y_0,delta_y_1))\n\t\t\t\t\n\t\t\t\toutput[y, x] = pixel\n\n\t\treturn output",
"def apply(filter, image, MinPad=True, pad=True):\n\n #Just for comfort:\n FFt = fft2\n iFFt = ifft2\n\n #The size of the images:\n r2 = image.size[0]\n c2 = image.size[1]\n r1 = len(filter)\n c1 = len(filter[0])\n \n #MinPad results simpler padding,smaller images:\n if MinPad:\n r = r1+r2\n c = c1+c2\n else:\n #if the Numerical Recipies says so:\n r = 2*max(r1,r2)\n c = 2*max(c1,c2)\n\n #For nice FFT, we need the power of 2:\n if pad:\n pr2 = int(math.log(r)/math.log(2.0) + 1.0 )\n pc2 = int(math.log(c)/math.log(2.0) + 1.0 )\n rOrig = r\n cOrig = c\n r = 2**pr2\n c = 2**pc2\n #end of if pad\n \n #numpy fft has the padding built in, which can save us some steps\n #here. The thing is the s(hape) parameter:\n fftimage = FFt(filter, s=(r,c))*FFt(image,s=(r,c))\n\n if pad:\n result = (iFFt(fftimage))[:rOrig,:cOrig].real\n else:\n result = (iFFt(fftimage)).real\n\n resultImage = Image.new(\"L\", (r2, c2))\n resultCanvas = resultImage.load()\n valueMax = result.max()\n valueMin = result.min()\n \n for x in xrange(0, r2):\n for y in xrange(0, c2):\n resultCanvas[y, x] = (result[x, y] - valueMin) * 255 / (valueMax - valueMin)\n return resultImage",
"def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n\tif not isinstance(X_shape, tuple):\n\t\traise ValueError(\"X_shape must be of type tuple\")\n\n\tif not isinstance(out_dim, tuple):\n\t\traise ValueError(\"out_dim must be of type tuple\")\n\n\tif not isinstance(kernel_shape, tuple):\n\t\traise ValueError(\"kernel_shape must be of type tuple\")\n\n\tif not isinstance(stride, int):\n\t\traise ValueError(\"stride must be of type int\")\n\n\td = dilation\n\tfr, fc = kernel_shape\n\tout_rows, out_cols = out_dim\n\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t# update effective filter shape based on dilaltion factor\n\t_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\tpr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n\tpc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\tout_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n\tout_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n\t# add asymmetric padding pixels to right/bottom\n\tpr1, pr2 = pr, pr\n\tif out_rows1 == out_rows - 1:\n\t\tpr1, pr2 = pr, pr+1\n\telif out_rows1 != out_rows:\n\t\traise AssertionError\n\n\tif any(np.array([pr1, pr2, pc1, pc2]) < 0):\n\t\traise ValueError(\n\t\t\t\"padding cannot be less than 0. Get: {}\".format((pr1, pr2, pc1, pc2))\n\t\t\t)\n\treturn (pr1, pr2, pc1, pc2)",
"def __calc_padding(self, input_shape, kernel_size, stride=1):\n # default of pytorch for input_size = (C_in, H_in, W_in)\n if len(input_shape) == 3:\n if stride != (1,1):\n raise ValueError(\"calc padding only works for stride=(1,1)\")\n padding = (0,0)\n if kernel_size[0]%2 == 0 or kernel_size[1]%2 == 0:\n raise ValueError(\"the kernel size: {} is incompatible with CnnHighway. With this kernel, the conv output shape will not equal the input shape\".format(kernel_size))\n padding_height = int((kernel_size[0] - 1)/2)\n padding_width = int((kernel_size[1] - 1)/2)\n return (padding_height, padding_width)\n if len(input_shape) == 2:\n if stride != 1:\n raise ValueError(\"calc padding only works for stride=(1)\")\n padding = int((kernel_size -1)/2)\n return padding",
"def conv2D_naive(X, W, stride, pad, dilation=0):\n\ts, d = stride, dilation\n\tX_pad, p = pad2D(X, pad, W.shape[:2], stride=s, dilation=d)\n\n\tpr1, pr2, pc1, pc2 = p\n\tfr, fc, in_ch, out_ch = W.shape\n\tn_ex, in_rows, in_cols, in_ch = X.shape\n\n\t# update effective filter shape based on dilation factor\n\tfr, fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n\tout_rows = int((in_rows + pr1 + pr2 - fr) / s + 1)\n\tout_cols = int((in_cols + pc1 + pc2 - fc) / s + 1)\n\n\tZ = np.zeros((n_ex, out_rows, out_cols, out_ch))\n\tfor m in range(n_ex):\n\t\tfor c in range(out_ch):\n\t\t\tfor i in range(out_rows):\n\t\t\t\tfor j in range(out_cols):\n\t\t\t\t\ti0, i1 = i * s, (i * s) + fr\n\t\t\t\t\tj0, j1 = j * s, (j * s) + fc\n\n\t\t\t\t\twindow = X_pad[m, i0 : i1 : (d + 1), j0 : j1 : (d + 1), :]\n\t\t\t\t\tZ[m, i, j, c] = np.sum(window * W[:, :, :, c])\n\treturn Z",
"def conv2D(X, W, stride, pad, dilation=0):\n\ts, d = stride, dilation\n\t_, p = pad2D(X, pad, W.shape[:2], s, dilation=dilation)\n\n\tpr1, pr2, pc1, pc2 = p\n\tfr, fc, in_ch, out_ch = W.shape\n\tn_ex, in_rows, in_cols, in_ch = X.shape\n\n\t# update effective filter shape based on dilation factor\n\t_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n\t# compute the dimensions of the covolution ouput\n\tout_rows = int((in_rows + pr1 + pr2 - _fr) / s + 1 )\n\tout_cols = int((in_cols + pc1 + pc2 - _fc) / s + 1 )\n\n\t# convert X and W into the appropriate 2D matrix and take their product\n\tX_col, _ = im2col(X, W.shape, p, s, d)\n\tW_col = W.transpose(3, 2, 0, 1).reshape(out_ch, -1)\n\n\tZ = (W_col @ X_col).reshape(out_ch, out_rows, out_cols, n_ex).reshape(3,1,2,0)\n\treturn Z",
"def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n if not isinstance(X_shape, tuple):\n raise ValueError(\"`X_shape` must be of type tuple\")\n\n if not isinstance(out_dim, tuple):\n raise ValueError(\"`out_dim` must be of type tuple\")\n\n if not isinstance(kernel_shape, tuple):\n raise ValueError(\"`kernel_shape` must be of type tuple\")\n\n if not isinstance(stride, int):\n raise ValueError(\"`stride` must be of type int\")\n\n d = dilation\n fr, fc = kernel_shape\n out_rows, out_cols = out_dim\n n_ex, in_rows, in_cols, in_ch = X_shape\n\n # update effective filter shape based on dilation factor\n _fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n pr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n pc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\n out_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n out_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n # add asymmetric padding pixels to right / bottom\n pr1, pr2 = pr, pr\n if out_rows1 == out_rows - 1:\n pr1, pr2 = pr, pr + 1\n elif out_rows1 != out_rows:\n raise AssertionError\n\n pc1, pc2 = pc, pc\n if out_cols1 == out_cols - 1:\n pc1, pc2 = pc, pc + 1\n elif out_cols1 != out_cols:\n raise AssertionError\n\n if any(np.array([pr1, pr2, pc1, pc2]) < 0):\n raise ValueError(\n \"Padding cannot be less than 0. Got: {}\".format((pr1, pr2, pc1, pc2))\n )\n return (pr1, pr2, pc1, pc2)",
"def sliding_window_decoding( model, X, input_shape, overlapping = 32 ) :\n patch_bboxes = get_patch_bboxes( X.shape, input_shape, overlapping )\n n_samples, n_chs, height, width = X.shape\n Z = np.zeros( X.shape, dtype = np.float32 )\n C = np.zeros( X.shape, dtype = np.float32 )\n pad_before, pad_after = min( input_shape ) // 4, min( input_shape ) // 4\n for top, bot, left, right in patch_bboxes :\n x = X[ :, :, top:bot, left:right ]\n z = model.predict( x )\n if ( top == 0 ) and ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot,left:right] += z\n C[:,:,top:bot,left:right] += 1. \n elif ( left == 0 ) :\n Z[:,:,top:bot,left:right-pad_after] += z[:,:,:,:-pad_after]\n C[:,:,top:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top:bot,left+pad_before:right] += z[:,:,:,pad_before:]\n C[:,:,top:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot,left+pad_before:right-pad_after] += z[:,:,:,pad_before:-pad_after]\n C[:,:,top:bot,left+pad_before:right-pad_after] += 1. \n elif ( top == 0 ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot-pad_after,left:right] += z[:,:,:-pad_after,:]\n C[:,:,top:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top:bot-pad_after,left:right-pad_after] += z[:,:,:-pad_after,:-pad_after]\n C[:,:,top:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top:bot-pad_after,left+pad_before:right] += z[:,:,:-pad_after,pad_before:]\n C[:,:,top:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,:-pad_after,pad_before:-pad_after]\n C[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += 1.\n elif ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top+pad_before:bot,left:right] += z[:,:,pad_before:,:]\n C[:,:,top+pad_before:bot,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot,left:right-pad_after] += z[:,:,pad_before:,:-pad_after]\n C[:,:,top+pad_before:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top+pad_before:bot,left+pad_before:right] += z[:,:,pad_before:,pad_before:]\n C[:,:,top+pad_before:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += z[:,:,pad_before:,pad_before:-pad_after]\n C[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += 1.\n else :\n if ( left == 0 ) and ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right] += z[:,:,pad_before:-pad_after,:]\n C[:,:,top+pad_before:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += z[:,:,pad_before:-pad_after,:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += z[:,:,pad_before:-pad_after,pad_before:]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,pad_before:-pad_after,pad_before:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += 1.\n return Z / C",
"def rolling_angular_difference(\n data: np.ndarray, fps: int, time_windows: np.ndarray\n ):\n\n data = np.deg2rad(data)\n results = np.full((data.shape[0], time_windows.shape[0]), 0.0)\n for time_window_cnt in prange(time_windows.shape[0]):\n window_size = int(time_windows[time_window_cnt] * fps)\n for window_end in prange(window_size, data.shape[0], 1):\n point_one, point_two = data[window_end - window_size], data[window_end]\n print(point_one, point_two)\n distance = np.pi - np.abs(np.pi - np.abs(point_one - point_two))\n results[window_end][time_window_cnt] = np.rad2deg(distance)\n\n return results",
"def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image",
"def calculate_w(dataframe_row, prev_w, prev_y, df_len):\n vector_w = prev_w + (prev_y / df_len * (dataframe_row - prev_y * prev_w))\n norm_vector_w = vector_w / np.linalg.norm(vector_w)\n return norm_vector_w.to_numpy()",
"def cut_window(image, window):\n \n return image[window[0][0]: window[0][1] + 1, window[1][0]: window[1][1] + 1].copy()",
"def _extract_weights(self,W):\n wl1_size = self._D*self._hidden_layer_size\n bl1_size = self._hidden_layer_size\n \n wl2_size = self._hidden_layer_size*self._output_size\n bl2_size = self._output_size\n\n \n weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size))\n bias_L1 = W[wl1_size:wl1_size+bl1_size]\n \n start_l2 = wl1_size+bl1_size\n\n weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size))\n bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size]\n \n \n \n return weights_L1,bias_L1,weights_L2,bias_L2"
] | [
"0.59613675",
"0.5644716",
"0.55131125",
"0.5483496",
"0.54455864",
"0.53608745",
"0.53575754",
"0.5356096",
"0.5319758",
"0.5300092",
"0.52018",
"0.5187523",
"0.5163065",
"0.5131752",
"0.5121635",
"0.50840497",
"0.50273937",
"0.500391",
"0.49987105",
"0.49745652",
"0.49571735",
"0.49540764",
"0.49253044",
"0.49207175",
"0.4918446",
"0.49138993",
"0.49004933",
"0.48802033",
"0.48745215",
"0.48553643"
] | 0.66690564 | 0 |
Updates the mouse_over variable and returns the button's action value when clicked. | def update(self, mouse_pos, mouse_up):
if self.rect.collidepoint(mouse_pos):
self.mouse_over = True
if mouse_up:
return self.action
else:
self.mouse_over = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mouse_over(self):\n pass",
"def update_button_hover_status(self):\n for button in self.playing_buttons:\n button.update(self.mousePos)",
"def mouse_hover(self):\n self.color1 = self.color # Color changes\n position = pygame.mouse.get_pos() # Get mouse position\n if self.rect.collidepoint(position): # If the mouse is inside the button rect\n self.color1 = LIGHT_GREEN # Change color to light green",
"def _action_hovered(self, action):\n self._emit_signal_for_action(self.action_hovered, action)",
"def update(self, mouse_pos, mouse_up):\n if self.rect.collidepoint(mouse_pos):\n self.mouse_over = True\n if mouse_up:\n \n try:\n eval(self.action)\n except:\n running = False\n \n else:\n self.mouse_over = False",
"def OnLeftUp_ClickButton(self, event):\r\n \r\n self._hover_button = None\r\n\r\n if self._action_part:\r\n self.RefreshButton(self._action_part)\r\n\r\n # make sure we're still over the item that was originally clicked\r\n if self._action_part == self.HitTest(*event.GetPosition()):\r\n \r\n # fire button-click event\r\n e = AuiManagerEvent(wxEVT_AUI_PANE_BUTTON)\r\n e.SetManager(self)\r\n e.SetPane(self._action_part.pane)\r\n e.SetButton(self._action_part.button.button_id)\r\n self.ProcessMgrEvent(e)",
"def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()",
"def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))",
"def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass",
"def check_button_hover(coord, play_button, high_scores_button):\r\n x = coord[0]\r\n y = coord[1]\r\n play_x = (play_button.rect.x <= x <= play_button.rect.x + play_button.width)\r\n play_y = (play_button.rect.y <= y <= play_button.rect.y + play_button.height)\r\n scores_x = (high_scores_button.rect.x <= x <= high_scores_button.rect.x + high_scores_button.width)\r\n scores_y = (high_scores_button.rect.y <= y <= high_scores_button.rect.y + high_scores_button.height)\r\n if play_x and play_y:\r\n play_button.text_color = (0, 255, 0)\r\n else:\r\n play_button.text_color = (255, 255, 255)\r\n\r\n play_button.prep_msg()\r\n play_button.draw_button()\r\n\r\n if scores_x and scores_y:\r\n high_scores_button.text_color = (0, 255, 0)\r\n else:\r\n high_scores_button.text_color = (255, 255, 255)\r\n\r\n high_scores_button.prep_msg()\r\n high_scores_button.draw_button()",
"def on_hover(self) -> None:",
"def UpdateButtonOnScreen(self, button_ui_part, event):\r\n\r\n hit_test = self.HitTest(*event.GetPosition())\r\n\r\n if not hit_test or not button_ui_part:\r\n return\r\n \r\n state = AUI_BUTTON_STATE_NORMAL\r\n \r\n if hit_test == button_ui_part:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_PRESSED\r\n else:\r\n state = AUI_BUTTON_STATE_HOVER\r\n else:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_HOVER\r\n \r\n # now repaint the button with hover state\r\n cdc = wx.ClientDC(self._frame)\r\n\r\n # if the frame has a toolbar, the client area\r\n # origin will not be (0,0).\r\n pt = self._frame.GetClientAreaOrigin()\r\n if pt.x != 0 or pt.y != 0:\r\n cdc.SetDeviceOrigin(pt.x, pt.y)\r\n\r\n if hit_test.pane: \r\n self._art.DrawPaneButton(cdc, self._frame,\r\n button_ui_part.button.button_id,\r\n state,\r\n button_ui_part.rect, hit_test.pane)",
"def on_mouse_release(self, x, y, button):\n pass",
"def button2(msg,x,y,w,h,ic,ac,action=None): #de button die wordt gebruikt als je een onzichtbare button wilt\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y: #als de muis over de knop hovert\r\n\r\n if click[0] == 1 and action != None: #als je er op klikt, doe actie\r\n action()\r\n\r\n smallText = pygame.font.SysFont(\"freesansbold.ttf\",20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)",
"def on_mouse_press(self, x, y, button):\n\n pass",
"def button(msg,x,y,w,h,ic,ac,action=None): # dit is de function die een button aanmaakt (text,x,y,width,height,kleur, hover kleur, actie)\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y: #als de muis over de knop hovert, verander de kleur\r\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\r\n if click[0] == 1 and action != None: #als je er op klikt, doe actie\r\n action()\r\n else:\r\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\r\n smallText = pygame.font.SysFont(\"freesansbold.ttf\",20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)",
"def MouseOverItem(self,item):\r\n pass",
"def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]",
"def GetButton(self):\r\n\r\n return self.button",
"def check_button_hover(self, mouse_pos):\n for button in self.buttons: # type: Button\n if button.is_position_on_button(mouse_pos):\n button.hover()\n else:\n button.un_hover()",
"def _mouse_action(self, pos, pygame):\r\n surface = pygame.display.get_surface()\r\n\r\n width = surface.get_width()\r\n height = surface.get_height()\r\n # get window size\r\n\r\n button_width = width / 5\r\n button_height = height / 6\r\n # calculate button size\r\n\r\n pixel_x, pixel_y = pos\r\n # get user interact position\r\n\r\n # check which button that user interact\r\n # all the conditional statements deal with what the user selects\r\n # on the screen. There are 25 buttons and hence that many conditional\r\n # statements\r\n if 0 < pixel_x < button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[0]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[1]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[2]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[3]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[4]\r\n elif 0 < pixel_x < button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[5]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[6]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[7]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[8]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[9]\r\n elif 0 < pixel_x < button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[10]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[11]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[12]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[13]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[14]\r\n elif 0 < pixel_x < button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[15]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[16]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[17]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[18]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[19]\r\n elif 0 < pixel_x < button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[20]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[21]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[22]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[23]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[24]",
"def button(self):\n return self._button",
"def button(self):\n return self._button",
"def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)",
"def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:",
"def OnMotion_Other(self, event):\r\n \r\n part = self.HitTest(*event.GetPosition())\r\n\r\n if part and part.type == AuiDockUIPart.typePaneButton \\\r\n and self.IsPaneButtonVisible(part):\r\n if part != self._hover_button:\r\n \r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n\r\n self._hover_button = part\r\n self.RefreshButton(part)\r\n \r\n else:\r\n \r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n else:\r\n event.Skip()\r\n\r\n self._hover_button = None",
"def _get_mouse_button_number(self, event):\n raise NotImplementedError",
"def update(self, surface: pygame.Surface, mouse_pos: Tuple[int, int], clicked: bool, *args: Any, **kwargs: Any) -> bool:\n\n executed = False\n button_color = self.button_disabled_color\n if self.enabled:\n if self.rect.collidepoint(mouse_pos):\n if clicked:\n self.function(*args, **kwargs)\n executed = True\n button_color = self.button_highlighted_color\n if pygame.mouse.get_pressed()[0]:\n button_color = self.button_pressed_color\n else:\n button_color = self.button_default_color\n if button_color != self.button_old_color:\n fade = self.button_color_fade.update()\n if fade == 1.0:\n draw_color = button_color\n self.button_old_color = button_color\n else:\n draw_color = (KDS.Math.Lerp(self.button_old_color[0], button_color[0], fade), KDS.Math.Lerp(self.button_old_color[1], button_color[1], fade), KDS.Math.Lerp(self.button_old_color[2], button_color[2], fade))\n else:\n draw_color = button_color\n pygame.draw.rect(surface, draw_color, self.rect)\n\n if self.overlay != None:\n surface.blit(self.overlay, (self.rect.center[0] - self.overlay.get_width() // 2, self.rect.center[1] - self.overlay.get_height() // 2))\n\n return executed",
"def mouse_button_state():\n x, y = c_int(0), c_int(0)\n bmask = mouse.SDL_GetMouseState(byref(x), byref(y))\n return ButtonState(bmask)",
"def get_button(self, button):\n return getattr(self.get_buttons_state(), button)"
] | [
"0.63936216",
"0.62173915",
"0.618309",
"0.6071253",
"0.60500014",
"0.5977573",
"0.583766",
"0.57638043",
"0.56980425",
"0.5674184",
"0.5642134",
"0.5620496",
"0.5582014",
"0.5577019",
"0.55757505",
"0.5544018",
"0.55341464",
"0.55198807",
"0.55091316",
"0.5495941",
"0.5458608",
"0.54495305",
"0.54495305",
"0.54490197",
"0.5448553",
"0.5433327",
"0.5415602",
"0.5382658",
"0.5381231",
"0.53718317"
] | 0.6484619 | 0 |
Draws element onto a surface | def draw(self, surface):
surface.blit(self.image, self.rect) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def draw(self, surface):\n\n\t\tsurface.blit(self.image, self.rect.topleft)",
"def draw(self, surface):\n surface.blit(self.image, (0,0))\n for widget in self.widgets:\n widget.draw(surface)",
"def draw(self, screen):\n screen.blit(self.surface, self.rect)",
"def draw(self):\n\n surf = self.get_oxygen_surface()\n surf.set_alpha(255)\n self.screen.blit(surf, self.pos)",
"def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)",
"def draw():",
"def draw(self, surface):\n\t\tblit_position = self.position - Vector2(self.radius) # Subtracting vector to get top-left corner start\n\t\tsurface.blit(self.sprite, blit_position)",
"def draw(self, p):\r\n self.active = True\r\n surface = pygame.surfarray.make_surface(p)\r\n self.screen.blit(surface, (0, 0))\r\n pygame.display.flip()\r\n return",
"def draw(self, surface):\n #\n # Set the positions of all the items\n y = 0\n self.my_surface.fill((0, 0, 0, 0))\n for idx, item in enumerate(self):\n y += self.item_heights\n item.pos = (item.width / 2, y - self.initial_offset)\n #\n # Now draw item\n item.draw(self.my_surface)\n # #\n # # And the scrollbar\n # self.scroll_grab.pos = (self.my_surface.get_size()[0] - S['scrolled-scroll-width'], 0)\n # self.scroll_grab.draw(self.my_surface)\n #\n # Draw main surface\n if self.background:\n self.background.pos = self.pos\n self.background.draw(surface)\n self.region.pos = self.pos\n self.region.draw(surface)",
"def draw(self, surface):\n temp = pygame.Surface(self.renderer.pixel_size)\n self.renderer.render_map(temp)\n pygame.transform.smoothscale(temp, surface.get_size(), surface)",
"def draw(self, surface, force=False):\n if self.redraw or force:\n surface.blit(self.image, self.loc)\n self.redraw = False",
"def draw(self, surface):\n color = pygame.Color(255, 255, 255)\n pygame.draw.circle(surface, color, self.position, Molecule.radius, 2)",
"def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*130), 40)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n # move of button box to correct screen position\n self.button_rect.move_ip(self.xpos, self.ypos)\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))",
"def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)",
"def draw(self):",
"def draw(self):\n self.screen.blit(self.image, self.rect)",
"def draw(self, surface):\n surface.fill(self.white)\n\n self.gui_manager.draw_ui(surface)",
"def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)",
"def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)",
"def draw(self, surface, camera=None):\n if camera:\n surface.blit(self.image, camera.apply(self.rect))\n else:\n surface.blit(self.image, self.rect)",
"def draw(self, screen):",
"def draw_piece(self):\n self.screen.blit(self.image, self.rect)",
"def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)",
"def draw(self,surface):\n surface.blit(self.image, self.rect)\n for moving in self.shots.values():\n moving.draw()",
"def draw(self, surface):\n\t\tangle = self.direction.angle_to(UP) # Translates spaceship's direction into rotation angle in degrees\n\t\trotated_surface = rotozoom(self.sprite, angle, 1.0) # Rotates the sprite. Last arg is scale change, hence 1.0\n\t\trotated_surface_size = Vector2(rotated_surface.get_size())\n\t\tblit_position = self.position - rotated_surface_size * 0.5 # Blit position calculated based on rotated surface size, which differs from original size\n\t\tsurface.blit(rotated_surface, blit_position)",
"def draw(self, surface):\n for molecule in self.molecules:\n molecule.draw(surface)",
"def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)"
] | [
"0.8029322",
"0.7931684",
"0.7931684",
"0.7848772",
"0.77393",
"0.7583257",
"0.75462705",
"0.74494654",
"0.73914146",
"0.73739934",
"0.7322107",
"0.728858",
"0.71618783",
"0.7161633",
"0.7157818",
"0.71283376",
"0.7125859",
"0.7120794",
"0.7112475",
"0.7098524",
"0.7094169",
"0.7069122",
"0.7068451",
"0.7060888",
"0.70342",
"0.7024376",
"0.70225316",
"0.7006295",
"0.7003818",
"0.69911695"
] | 0.7984203 | 1 |
Handles game loop until an action is return by a button in the buttons sprite renderer. | def game_loop(screen, buttons):
while True:
mouse_up = False
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mouse_up = True
screen.fill(BLACK)
for button in buttons:
ui_action = button.update(pygame.mouse.get_pos(), mouse_up)
if ui_action is not None:
return ui_action
screen.blit(img, imgrect)
buttons.draw(screen)
pygame.display.flip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)",
"def wait_for_start(self):\n while True:\n ev = self.scene.waitfor('click')\n game_type = self.on_click(ev)\n if game_type:\n return game_type",
"def game_loop(self):\n self.interface.game_loop(self)",
"def start_menu():\r\n score = 0\r\n\r\n while True:\r\n pg.display.set_caption(\"'METEOR BLASTER - 'The Dark Souls of Arcade Games'\")\r\n pg.key.set_repeat(1, 1)\r\n\r\n bg_image = pg.image.load(\"images/meteor shower load game.png\").convert()\r\n bg_image = pg.transform.scale(bg_image, screen.get_size())\r\n screen.blit((bg_image), (0, 0))\r\n\r\n # defines all the buttons on the screen\r\n play_button = sprites.button(\r\n s.play_pos, (200, 50), \"images/play_button.png\", \"images/hl_play_button.png\"\r\n )\r\n one_player_button = sprites.button(\r\n s.one_player_pos,\r\n (130, 50),\r\n \"images/one_player_button.png\",\r\n \"images/hl_op_button.png\",\r\n )\r\n two_player_button = sprites.button(\r\n s.two_player_pos,\r\n (130, 50),\r\n \"images/two_player_button.png\",\r\n \"images/hl_tp_button.png\",\r\n )\r\n load_game_button = sprites.button(\r\n s.load_game_pos,\r\n (130, 50),\r\n \"images/load_game_button.png\",\r\n \"images/hl_lg_button.png\",\r\n )\r\n\r\n # group to hold all the buttons\r\n button_group = pg.sprite.Group()\r\n button_group.add(\r\n play_button, one_player_button, two_player_button, load_game_button\r\n )\r\n\r\n pg.display.update()\r\n end = False\r\n game_loaded = False\r\n\r\n # loops until a mode is selected\r\n while not (end):\r\n\r\n # highlights buttons when the mouse hovers over them\r\n for button in button_group:\r\n button.highlight()\r\n\r\n for event in pg.event.get():\r\n\r\n # if player clicks\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n x, y = pg.mouse.get_pos()\r\n\r\n # if one player mode is clicked\r\n if one_player_button.rect.collidepoint(x, y):\r\n s.two_player = False\r\n one_player_button.highlight()\r\n end = True\r\n\r\n # if two player mode is clicked\r\n if two_player_button.rect.collidepoint(x, y):\r\n s.two_player = True\r\n two_player_button.highlight()\r\n end = True\r\n\r\n # if load game button is clicked\r\n if load_game_button.rect.collidepoint(x, y):\r\n helpers.enter_name()\r\n score = helpers.load_game()\r\n end = True\r\n game_loaded = True\r\n\r\n pg.display.update()\r\n\r\n # loops until play button is clicked and the game begins\r\n while end:\r\n if game_loaded:\r\n end = False\r\n for event in pg.event.get():\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n x, y = pg.mouse.get_pos()\r\n if play_button.rect.collidepoint(x, y):\r\n end = False\r\n\r\n return score",
"def control(self):\n while not (self.game_over() or self.quit):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.key == pygame.K_LEFT and len(self.sequence)>=2:\n self.sequence.pop()\n self.board = self.sequence.pop()\n self.draw()\n elif event.key == pygame.K_1:\n self.tip(1)\n elif event.key == pygame.K_2:\n self.tip(2)\n elif event.key == pygame.K_3:\n self.tip(3)\n elif event.key == pygame.K_4:\n self.tip(4)\n elif event.key == pygame.K_5:\n self.tip(5)\n \n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n ## if mouse is pressed get position of cursor ##\n pos = pygame.mouse.get_pos()\n ## check if cursor is on button ##\n for i in range(len(self.buttons)):\n for j in range(len(self.buttons[i])):\n if self.buttons[i][j].collidepoint(pos):\n if self.selected == None:\n self.selected = [i,j]\n elif self.selected == [i,j]:\n self.selected = None\n elif self.board[self.selected[0]][self.selected[1]]==0:\n self.selected = [i,j]\n else:\n if self.move(i,j):\n self.selected = None\n self.draw()\n return True\n else:\n self.selected = None\n self.draw()\n return False\n self.draw()\n return False",
"def breakout_loop(self):\n while self.playing:\n self.handle_events()\n self.update()\n if self.game_over:\n self.current_menu = self.fail_menu\n self.playing = False\n self.reset()\n self.draw()",
"def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)",
"def run_next(self, action):\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Run the simulation loop\r\n self.SimulationLoop(action)\r\n if GUIEnabled and self.settings.drawMenu:\r\n self.gui_app.paint(self.screen)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.settings.hz)\r\n self.fps = self.clock.get_fps()",
"def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)",
"def run_game(self):\n while True:\n self._check_event()\n self._update_screen()",
"def buttonEventCallback(argument):\n global buttonCanBePressed\n if buttonCanBePressed is True:\n ledpedbutton.value(1)\n buttonCanBePressed = False\n start_new_thread(is_timer_4, tuple('0'))",
"def Gameloop():",
"def GAME_LOOP():\n pass",
"def player_loop(self):\n\n while True:\n # send message to game that you are ready\n msg = self.receiver()\n if msg[\"game_over\"]:\n return",
"def run(self):\n while True:\n if self.game_over: \n return \n\n self.handle_events() \n if self.paused:\n continue\n\n self.update_generation()\n self.draw_grid()\n\n self.cap_frame_rate()",
"def check_replay_button(self, mouse_x, mouse_y):\r\n for button in self._replay_button_list:\r\n if button.get_button_rect().collidepoint(mouse_x, mouse_y):\r\n button_clicked = button\r\n break\r\n else:\r\n button_clicked = None\r\n\r\n if button_clicked is not None and button_clicked.get_num_atom() == 1:\r\n self.setup_new_game()\r\n elif button_clicked is not None and button_clicked.get_num_atom() == 2:\r\n sys.exit()",
"def GAMEOVER_LOOP():\n pass",
"def main_loop(self) -> None:\n while True:\n player = self._players[self._current_player]\n hit = True\n while hit:\n self.select_square(player)\n if self.menu_called: # go to menu\n self.menu_called = False\n return\n hit = player.shoot()\n if player.has_won():\n self.display_manager.display_end_game_message(player)\n self.game_over = True\n return\n self._current_player = (self._current_player + 1) % len(self._players)",
"def handle_left_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received left click:', row, ',', col\n celllist = self.board.opencell(row, col)\n if celllist == []:\n return\n for cell in celllist:\n row = cell[0]\n col = cell[1]\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Empty:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/OpenedSquare.png\"))\n elif cell_property == CellProperty.Mine:\n # Game over\n for row in range(self.rows):\n for col in range(self.cols):\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Mine:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/mine.ico\"))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley3.ico\"))\n self.game_in_progress = False\n self.timer.stop()\n return\n elif cell_property == CellProperty.MineCountOne:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/1.png\"))\n elif cell_property == CellProperty.MineCountTwo:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/2.png\"))\n elif cell_property == CellProperty.MineCountThree:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/3.png\"))\n elif cell_property == CellProperty.MineCountFour:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/4.png\"))\n elif cell_property == CellProperty.MineCountFive:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/5.png\"))\n elif cell_property == CellProperty.MineCountSix:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/6.png\"))\n elif cell_property == CellProperty.MineCountSeven:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/7.png\"))\n elif cell_property == CellProperty.MineCountEight:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/8.png\"))\n\n game_status = self.board.continuegame()\n print 'Game Status:', game_status\n if game_status == GameStatus.GameWon:\n self.timer.stop()\n self.game_in_progress = False\n player_name = QtGui.QInputDialog.getText(self, \"Name Please !!\",\\\n \"Enter your name for leader board:\")\n # TODO: Replace 1 with the time taken by the end user.\n LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley.ico\"))\n print \"You have won the game\"",
"def play_cpu(self):\n \n # Play button sound\n self.button_sound()\n\n while True:\n\n # If turns is 9, then all the places on the board are filled. Hence Cpu doesn't get a turn. \n if self.turn >= 9:\n break\n\n # Choose a random position and if that position on board is empty, then place a 'O' there.\n i = random.randint(0, 8)\n if self.board[i] == 0:\n #root.after(400)\n self.button_list[i].config(image=self.O_img)\n self.board[i] = -1\n self.turn += 1\n\n break",
"def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n self.sleep_time = 0\n return\n\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n\n if self.button.collidepoint(pos):\n if self.state == \"solving\":\n self.state = \"stopping\"\n\n if self.state == \"solved\":\n self.state = \"waiting\"\n self.puzzle_state = \"solving\"\n self.button_text = \"Solve!\"\n self.board = self.original_board.copy()\n\n elif self.state == \"waiting\":\n self.state = \"solving\"\n self.button_text = \"Stop!\"\n self.button_color = BUTTON_COLOR_STOP\n\n isSolved = self.solve()\n\n self.button_color = BUTTON_COLOR_SOLVE\n if isSolved:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"solved\"\n else:\n if self.state == \"stopping\":\n self.state = \"waiting\"\n self.button_text = \"Solve!\"\n self.puzzle_state = \"solving\"\n else:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"failed\"",
"def handle_button(self, button):\n last_run = self.last_seen[button] if button in self.last_seen else 0\n diff = time.time() - last_run\n\n if diff <= 1:\n logging.warning(\"duplicate: %s, %d, %d\", button, last_run, diff)\n return\n\n try:\n cmd = buttons.COMMANDS[button]\n except KeyError:\n logging.warning(\"No instructions found for button %s.\", button)\n return\n\n self.last_seen[button] = time.time()\n\n try:\n function, music, zone = cmd\n except ValueError, ex:\n logging.warning(\"Couldn't parse instructions from %s: %s\", cmd, ex)\n return\n\n device = self.player.zone(zone)\n if not device:\n logging.warning(\"Can't find a device called %s\", zone)\n return\n\n # If this is the same button we saw last, pause or unpause it.\n if button == self.last_button:\n device.toggle()\n return\n\n if function == \"play_local\":\n self.play_local(music, device)\n self.last_button = button\n else:\n logging.warning(\"Don't know how to %s.\", cmd)",
"def action_handler(self):\n if self.state == data.DEAD:\n return\n\n x = 0\n for check in self.state_chart[self.state]:\n if not check:\n x += 1\n continue\n elif check():\n self.state = x\n\n # Some messages when state changes\n if self.state == data.CHASE:\n self.handler.message_box.add_msg(\"{} sees you!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n elif self.state == data.RUN:\n self.handler.message_box.add_msg(\"{} runs away!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n\n x += 1\n\n if self.state == data.HOLD:\n return\n elif self.state == data.CHASE:\n self.chase(self.handler.player)\n elif self.state == data.RUN:\n self.run(self.handler.player)",
"def update(self):\n self.game.check_inputs()\n\n if self.game.actions[pygame.K_RETURN]:\n self.game.restart()",
"def wait_keydown(self):\n while True:\n self.clock.tick(self.fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n if event.type == pygame.KEYDOWN:\n return",
"def handle_next_action(self, next_action):\n if next_action == 'retry':\n self.reset()\n\n elif next_action == 'quit':\n exit_app()\n\n elif next_action == 'back':\n self.__running = False\n return True\n\n return False",
"def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))",
"def example(self):\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)",
"def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")",
"def run(self):\r\n ##boucle appellant render() 30fois par seconde\r\n r = 0\r\n while r == 0:\r\n r = self.update()\r\n self.render()\r\n time.sleep(1/30)\r\n return r"
] | [
"0.65111536",
"0.6462207",
"0.6459699",
"0.6442829",
"0.6415537",
"0.64030033",
"0.63696444",
"0.635908",
"0.6352003",
"0.63030666",
"0.62873745",
"0.6223671",
"0.62234336",
"0.61902213",
"0.61875373",
"0.6147553",
"0.61422455",
"0.61360407",
"0.6135536",
"0.6128289",
"0.6121313",
"0.6115664",
"0.61136466",
"0.61133724",
"0.6096315",
"0.60932153",
"0.60835063",
"0.6082284",
"0.60783064",
"0.60657763"
] | 0.7331514 | 0 |
handle a keypress space > take a screen shot tab > start/stop recording a screencast escape > quit | def onKeypress(self, keycode):
# space
if keycode == 32:
self._captureManager.writeImage('screenshot.png')
# tab
elif keycode == 9:
if not self._captureManager.isWritingVideo:
self._captureManager.startWritingVideo('screencast.avi')
else:
self._captureManager.stopWritingVideo()
# escape
elif keycode == 27:
self._windowManager.destroyWindow() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keypress(self, key): # pragma: no cover\n if key == \"s\":\n self.screenshot()\n\n elif key == \"q\" or key == \"Esc\":\n self.close()\n\n elif key == \"c\":\n self._print_camera()",
"def on_key_press(symbol, modifiers):\n\n if symbol == key.BACKSPACE or symbol == key.SLASH:\n print('RESET')\n env.reset()\n env.render()\n elif symbol == key.PAGEUP:\n env.unwrapped.cam_angle[0] = 0\n elif symbol == key.ESCAPE:\n env.close()\n sys.exit(0)\n\n # Take a screenshot\n # UNCOMMENT IF NEEDED - Skimage dependency\n # elif symbol == key.RETURN:\n # print('saving screenshot')\n # img = env.render('rgb_array')\n # save_img('screenshot.png', img)",
"def execute_pause(self):\n print(self.empty_lines + self.double_tab + \"to continue press any key..\\r\")\n self.getch()",
"def start_record(cr):\r\n \"\"\"Emulate the keyboard \"\"\"\r\n _player = input_playback.InputPlayback()\r\n _player.emulate(input_type='keyboard')\r\n _player.find_connected_inputs()\r\n \"\"\"To get list of UI elements\"\"\"\r\n ui = ui_utils.UI_Handler()\r\n ui.start_ui_root(cr)\r\n list=ui.get_name_role_list()\r\n \"\"\"To Open status tray and click on Screen Recording option\"\"\"\r\n logging.info(\"Opening status tray\")\r\n ui.doDefault_on_obj(STATUS_TRAY_REGEXP, True, role='button')\r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj('/Close/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen capture/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen record/i', True,role='toggleButton')\r\n ui.doDefault_on_obj('/Record full screen/i', True,role='toggleButton')\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_enter')\r\n \"\"\"To open Chrome Page\"\"\"\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_ctrl+t')\r\n time.sleep(WAIT)\r\n logging.info(\"Recording Started\")\r\n return ui",
"def on_key_press(symbol, modifiers):\n if symbol == key.SPACE:\n world.next_step()",
"def __time_key_release_event(self, event):\n\t\tif event.key() == QtCore.Qt.Key_Space:\n\t\t\tself._player.stop() if self._player.is_playing else _player._video.play()",
"def capture_key_press(self, event):\n\n file_object = open(log,'a')\n file_object.write(event.Key)\n file_object.write('\\n')\n \n if event.Ascii==94:\n file_object.close()\n self.listener.cancel()",
"def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)",
"def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()",
"def _D(stdscr):\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n import pdb; pdb.set_trace()",
"def keyPressEvent(self, event):\n if type(event) == QtGui.QKeyEvent:\n if event.key() == Qt.Key_S:\n if self.swap_button.isEnabled():\n self.swap_cap_live()\n\n if event.key() == Qt.Key_C:\n self.capture()\n\n if event.key() == Qt.Key_R:\n if self.rec_seq_button.isEnabled():\n self.record_sequence()\n\n if event.key() == Qt.Key_T:\n self.toggle_exposure()\n\n if event.key() == Qt.Key_Delete:\n \"\"\"\n Look for activated ROI and delete it.\n \"\"\"\n self.live_screen.delete_activated()\n\n if event.key() == Qt.Key_A:\n self.live_screen.delete_features()",
"def _on_key_press(self, key):\n if key is self.TRIGGER_KEY and not self.do_record:\n print(\"Start Recording...\")\n self.do_record = True",
"def parse_keypress(self, wid, event):\n\n keyname = Gdk.keyval_name(event.keyval)\n if keyname == \"Control_R\": # Key for query\n self.get_output()\n elif keyname == \"Page_Up\": # Goes to previous query\n tot = len(self.history)\n if -(self.prompt_cursor) != tot:\n self.prompt_cursor -= 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)\n\n elif keyname == \"Page_Down\": # Drops to next query\n if (self.prompt_cursor) != -1:\n self.prompt_cursor += 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)",
"def break_stimulus(win,break_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show break stimulus\n #if 50 seconds pass, then quit experiment\n break_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n core.quit\n break_stim.setAutoDraw(False)",
"def run(self):\n while inputs.check_for_key_press() == None:\n graphics.clear_display_surf()\n graphics.get_display_surf().blit(self.title_pause_surf, self.title_pause_rect)\n graphics.update_display_surf() \n self.fps_clock.tick(4)\n inputs.clear_event_queue()",
"def event(self,events):\n for event in events:\n if event.type == KEYDOWN:\n if event.key == K_RETURN:#starts the game\n self.game.gotoMain()\n #print \"r\"\n if event.key == K_ESCAPE:#quits the game\n sys.exit(0)",
"def _on_key_release(self, event):",
"def _on_key_press(self, event):",
"def pause_handler(term):\n inp = None\n while inp not in (\"p\", \"P\", \"q\", \"Q\"):\n print(term.home + term.clear + term.move_y(term.height // 2))\n print(term.black_on_white(term.center(\"press P to continue.\")))\n\n inp = term.inkey(timeout=10)",
"def vim():\n keepgoing = True\n textmode = False\n tmpstr = ''\n mm = 0\n while keepgoing:\n input = click.termui.getchar()\n if textmode:\n if 13 == ord(input):\n # hit enter. send it then reset us\n roku_master.literal(tmpstr)\n tmpstr = ''\n textmode = False\n click.echo('')\n else:\n click.echo(input, nl=False)\n tmpstr += input\n elif 'q' == input:\n keepgoing = False\n elif 'b' == input:\n mm = do_x('back')\n elif 'j' == input:\n mm = do_x('down', mm)\n elif 'k' == input:\n mm = do_x('up', mm)\n elif 'h' == input:\n mm = do_x('left', mm)\n elif 'l' == input:\n mm = do_x('right', mm)\n elif 'f' == input:\n mm = do_x('forward', mm)\n elif 'r' == input:\n mm = do_x('reverse', mm)\n elif 'p' == input:\n mm = do_x('play', 1)\n elif 13 == ord(input): # enter\n mm = do_x('select')\n elif input in '123456789':\n mm = int(input)\n elif '*' == input:\n mm = do_x('info', 1)\n elif 'i' == input:\n textmode = True",
"def end_stimulus(win,end_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show end stimulus\n #if 50 seconds pass, then stop showing end stimulus\n end_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n break\n end_stim.setAutoDraw(False)",
"def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()",
"def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1",
"def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()",
"def on_press(self, keyname):\n if self.keydown:\n return\n try:\n self.keydown = True\n keyname = str(keyname).strip('\\'')\n log.info('KEY PRESS ' + keyname)\n if keyname == 'Key.esc':\n self.toggle_tracking(False)\n # self.tracking = False\n self.drone.land()\n self.drone.quit()\n\n \n cv2.destroyAllWindows() \n os._exit(0)\n \n if keyname in self.controls_keypress:\n self.controls_keypress[keyname]()\n except AttributeError:\n log.debug(f'special key {keyname} pressed')",
"def on_key_press(self, key, _modifiers): \n if key == arcade.key.ESCAPE: # resume game\n self.window.show_view(self.instruction_view)",
"def run(self): \n while inputs.check_for_key_press() == None:\n graphics.clear_display_surf()\n graphics.get_display_surf().blit(self.title_over_surf, self.title_over_rect)\n graphics.update_display_surf()\n self.fps_clock.tick(4)\n inputs.clear_event_queue()",
"def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_R:\n os.system(\"pkill aplay\")\n os.system(\"aplay T-Rex.wav &\")\n glfw.set_time(0)\n if key == glfw.KEY_N:\n self.normal_mapping = 1 - self.normal_mapping",
"def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)",
"def run(self):\n global key\n getch = _GetchUnix()\n key = getch()\n while key != \"e\":\n key = getch()\n #time.sleep(0.1)"
] | [
"0.6659243",
"0.6474064",
"0.6172918",
"0.6085636",
"0.60522807",
"0.60106236",
"0.59625655",
"0.59214485",
"0.5895227",
"0.5893601",
"0.5828445",
"0.57692516",
"0.5768595",
"0.5760644",
"0.5697636",
"0.5668367",
"0.5653859",
"0.56379724",
"0.5637277",
"0.56291217",
"0.56273264",
"0.56140304",
"0.56040114",
"0.55802184",
"0.55746084",
"0.55717206",
"0.5556608",
"0.555497",
"0.5544644",
"0.5528545"
] | 0.7867244 | 0 |
Convert octal string to binary string. | def oct2bin(x):
return bin(int(x, 8))[2:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual",
"def _str_to_binary_string(string: str) -> str:\n binary_string = \"\"\n for char in string:\n ascii_code = ord(char)\n binary_string += format(ascii_code, \"08b\")\n\n if binary_string:\n return binary_string\n else:\n raise ValueError(\"Error converting message to binary\")",
"def bin2oct(x):\n return oct(int(x, 2))[_oct_index:]",
"def to_bin(string, delimiter=None):\n if delimiter == None:\n return ''.join(format(ord(char), '08b') for char in string)\n elif delimiter == 'space':\n return ' '.join(format(ord(char), '08b') for char in string)",
"def convert_text_to_binary(plaintext):\n\t\n\tnums = convert_to_num(plaintext)\n\tnums = [format(num, '08b') for num in nums]\n\tbinary_string = ''.join(nums)\n\treturn binary_string",
"def get_binary(string):\r\n # Use special logic for NULL_STRING to avoid errors\r\n if string == NULL_STRING:\r\n return \"00000000\"\r\n # Otherwise, gives the binary representation of UTF-8 characters\r\n return \"\".join(\"{:08b}\".format(d) for d in bytearray(string, \"utf-8\"))",
"def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")",
"def ascii_to_binary(string):\r\n\tbin_string = \"\"\r\n\tfor i in range(0,len(string)):\r\n\t\tbin_string += conversions.decimal_to_binary(search(alphabet, string[i])+32)\r\n\treturn bin_string",
"def translate_to_binary(command):\r\n int_command = int(command)\r\n binary_command = bin(int_command)[2:]\r\n missing_bits = CMD_LEN - len(binary_command)\r\n cmd_prefix = missing_bits * str(0)\r\n binary_command = str(cmd_prefix) + str(binary_command)\r\n return binary_command + \"\\n\"",
"def to_binary_string(x):\n return \"{0:b}\".format(x)",
"def binstr(x):\n xs = binary_repr(x)\n\n outstr = xs;\n for i in range(8 - len(xs)):\n outstr = '0' + outstr\n return outstr",
"def int2bin(n: int) -> str:",
"def decimal2binary(self, n):\n\n octet = [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"]\n index = 0\n if n < 0 or n > 255:\n raise ValueError, \"Octet value must be between [0-255]\"\n if n == 0: \n return \"\".join(octet)\n while n > 0:\n octet[index] = str((n % 2))\n index += 1\n n = n >> 1\n octet.reverse()\n return \"\".join(octet)",
"def str_to_bin(string):\n ret = list(string)\n # convert to binary representation\n ret = ['{:07b}'.format(ord(x)) for x in ret]\n # split the binary into\n ret = [[bit for bit in x] for x in ret]\n # flatten it and convert to integers\n ret = [int(bit) for sublist in ret for bit in sublist]\n return ret",
"def ascii_to_bit(ascii_string: str) -> str:\n result = bin(int.from_bytes(ascii_string.encode(), 'big'))\n result = result[2:] # We don't want this '0b' at the beginning.\n while len(result) % 8 != 0:\n result = '0' + result\n return result",
"def dec2oct(x):\n return oct(x)[_oct_index:]",
"def convert_to_binary(num):\n return '{0:b}'.format(num)",
"def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]",
"def repr_as_binary(value):\n b = bin(value)[2:]\n return b.zfill(len(b) + -len(b) % 8)",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def de_octal(msg):\n try:\n msglist = msg.split(' ')\n characters = []\n for octal in msglist:\n n = int(octal, base=8)\n characters.append(n)\n d_msg = ''\n for c in characters:\n d_msg += ''.join(chr(c))\n return d_msg\n except ValueError:\n print('Invalid octal-encoded message')",
"def hex2oct(x):\n # moreZero = random.choice(range(10))\n moreZero = 0\n return oct(int(x, 16)).zfill(moreZero + len(oct(int(x, 16)))).strip('L')",
"async def binstr(self, ctx, *, input_binary = None):\n\t\tif input_binary == None:\n\t\t\tawait ctx.send(\"Usage: `{}binstr [input_binary]`\".format(ctx.prefix))\n\t\t\treturn\n\t\t# Clean the string\n\t\tnew_bin = \"\"\n\t\tfor char in input_binary:\n\t\t\tif char is \"0\" or char is \"1\":\n\t\t\t\tnew_bin += char\n\t\tif not len(new_bin):\n\t\t\tawait ctx.send(\"Usage: `{}binstr [input_binary]`\".format(ctx.prefix))\n\t\t\treturn\n\t\tmsg = ''.join(chr(int(new_bin[i:i+8], 2)) for i in range(0, len(new_bin), 8))\n\t\tawait ctx.send(self.suppressed(ctx.guild, msg))",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def decimal_binary(num):\n\treturn \"{:08b}\".format(num)",
"def string2bits(s=''):\n return [bin(ord(x))[2:].zfill(8) for x in s]",
"def bit_to_ascii(bit_string: str) -> str:\n assert len(bit_string) % 8 == 0\n\n n = int(bit_string, 2)\n result = n.to_bytes((n.bit_length() + 7) // 8, 'big').decode()\n return result",
"def binary_reversal(string):\n\tbinary = bin(int(string))\n\tnew_binary = binary.replace('b', '0')\n\tnew_binary = new_binary[::-1]\n\t\n\tdec = int(new_binary, 2)\n\tprint dec",
"def oct2dec(x):\n return int(x, 8)",
"def str_to_byn(str_):\n\n return ' '.join(bin(byte).lstrip('0b') for Item in str_ for byte in Item.encode())"
] | [
"0.75368214",
"0.70557326",
"0.69577855",
"0.69247776",
"0.6849758",
"0.6813903",
"0.6772026",
"0.67233974",
"0.66740793",
"0.6569174",
"0.64823663",
"0.6465421",
"0.64253926",
"0.63317066",
"0.6325535",
"0.6298426",
"0.61963886",
"0.61666733",
"0.6131182",
"0.61288166",
"0.6098404",
"0.60845244",
"0.6079765",
"0.6064355",
"0.6041874",
"0.6037334",
"0.6029483",
"0.6012141",
"0.600494",
"0.59497905"
] | 0.7644674 | 0 |
Convert octal string to decimal number. | def oct2dec(x):
return int(x, 8) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dec2int(r: str) -> int:",
"def octal_frac_to_decimal(octal_frac_string):\n result = 0.0\n for place, digit in enumerate(octal_frac_string, start=1):\n result += int(digit) * (8 ** -place)\n\n return result",
"def strToDec(string):\n\tstring = string.lstrip(\"0\")\n\tif len(string) == 0:\n\t\treturn 0\n\telse:\n\t\treturn eval(string)",
"def dec2oct(x):\n return oct(x)[_oct_index:]",
"def hex2int(r: str) -> int:",
"def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")",
"def bin2int(r: str) -> int:",
"def Oct(num):\n n = CInt(num)\n if n == 0:\n return \"0\"\n else:\n return oct(n)[2:]",
"def binary_to_num(s):\n if s == \"\":\n return 0\n return binary_to_num(s[:-1]) * 2 + (s[-1] == '1')",
"def convert_to_decimal(roman_input):\n def _get_next(r):\n for i in (2,1):\n if r[:i] in roman_to_decimal:\n return roman_to_decimal[r[:i]], r[i:]\n\n if not roman_input:\n return 0\n\n try:\n roman_input = roman_input.upper()\n current, rest = _get_next(roman_input)\n except:\n raise ValueError(roman_input)\n\n return current + convert_to_decimal(rest)",
"def to_decimal(binary):\n if len(re.findall('[0-1]+', binary)[0] )< 9:\n return int('0b'+binary, 2)\n return -1",
"def int2dec(n: int) -> str:",
"def test_octal_helpers(self, number, expected):\n self.assertEqual(positional.from_octal(expected), number)\n self.assertEqual(positional.to_octal(number), expected)\n self.assertEqual(positional.to_octal(str(number)), expected)",
"def hex2value10(hex_str):\n return hex2int(hex_str) / 10.0",
"def bin2oct(x):\n return oct(int(x, 2))[_oct_index:]",
"def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual",
"def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]",
"def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)",
"def str2num(size, s):\n\n\ti = 0\n\tn = 0\n\twhile i < size:\n\n\t\tn = n | (ord(s[i]) << (i*8))\n\t\ti = i + 1\n\n\treturn n",
"def atoi(c: str) -> Union[int, str]:\n return int(c) if c.isdigit() else c",
"def oct2bin(x):\n return bin(int(x, 8))[2:]",
"def de_octal(msg):\n try:\n msglist = msg.split(' ')\n characters = []\n for octal in msglist:\n n = int(octal, base=8)\n characters.append(n)\n d_msg = ''\n for c in characters:\n d_msg += ''.join(chr(c))\n return d_msg\n except ValueError:\n print('Invalid octal-encoded message')",
"def string_to_digit(string, output):\n string = strip_space(string)\n if not string[0].isdigit() and not string[1].isdigit():\n return None\n\n string_items = []\n for index, item in enumerate(string):\n if item.isdigit():\n string_items.append(item)\n else:\n if item == ',':\n string_items.append('.')\n\n elif item == ' ' and string[index + 1].isdigit():\n pass\n\n elif not item.isdigit() and not string[index + 1].isdigit():\n break\n\n if '.' in string_items and output == int:\n return int(float(''.join(string_items)))\n\n return output(''.join(string_items))",
"def toint(s):\n try:\n n = int(s)\n except ValueError:\n n = 0\n return n if n >= 0 else 0",
"def string_to_int(s):\n return functools.reduce(lambda running_sum, c: running_sum * 10 + string.digits.index(c),\n s[s[0] == '-':], 0) * (-1 if s[0] == '' else 1)",
"def str2num(s):\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return float(s)",
"def str_to_num(s):\n\n method = {\n \"float\": string.atof,\n \"int\": string.atoi\n }\n\n if not type(s) is StringType:\n return 0\n\n if \".\" in s:\n return method[\"float\"](s)\n else:\n return method[\"int\"](s, 10)",
"def read_dash_decimal(num_str):\n result = 0\n if '-' in num_str:\n val, dec = num_str.split('-')\n result = int(val) + int(dec)/10.0**(len(dec))\n else:\n result = int(num_str)\n\n return result",
"def binstr2dec(bstr):\n return int(bstr, base=2)",
"def hex2oct(x):\n # moreZero = random.choice(range(10))\n moreZero = 0\n return oct(int(x, 16)).zfill(moreZero + len(oct(int(x, 16)))).strip('L')"
] | [
"0.7270691",
"0.6982738",
"0.6802102",
"0.6558195",
"0.63187855",
"0.6228094",
"0.61978084",
"0.6157006",
"0.6082201",
"0.60479116",
"0.6019029",
"0.59776914",
"0.59653455",
"0.5915168",
"0.588256",
"0.5826446",
"0.5811062",
"0.5801341",
"0.5750719",
"0.5742125",
"0.5740209",
"0.5715364",
"0.5661726",
"0.56603396",
"0.5635041",
"0.56023604",
"0.558677",
"0.5564827",
"0.5554875",
"0.5554401"
] | 0.73984647 | 0 |
Convert octal string to hexadecimal string. | def oct2hex(x):
return hex(int(x, 8))[2:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def int2hex(n: int) -> str:",
"def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))",
"def hex(string):\n return string.encode('hex')",
"def hex2oct(x):\n # moreZero = random.choice(range(10))\n moreZero = 0\n return oct(int(x, 16)).zfill(moreZero + len(oct(int(x, 16)))).strip('L')",
"def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)",
"def convert_to_hex(input_string: str):\n return \" \".join([hex(ord(ch))[2:] for ch in input_string])",
"def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]",
"def bitstr_to_hex(a):\n return hex(bitstr_to_int(a))",
"def ascii_string_to_hex(input_string):\n\t\tascii_hex = [hex(ord(char))[2:] for char in input_string]\n\t\toutput_string = ' '.join(ascii_hex)\n\t\treturn output_string",
"def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)",
"def int_to_hex(a):\n return hex(a)",
"def convertirHexadecimal(self):\n self.convertir(lambda c: hex(ord(c))[2:], sep=' ')",
"def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)",
"def hex(cls, x):\n return c_hex(x)",
"def bytes_to_hex(s):\n\n return s.encode(\"hex\")",
"def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")",
"def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])",
"def conv_hex(num):\n\n if num < 10:\n return str(num)\n if num == 10:\n return 'A'\n if num == 11:\n return 'B'\n if num == 12:\n return 'C'\n if num == 13:\n return 'D'\n if num == 14:\n return 'E'\n if num == 15:\n return 'F'",
"def dec2hex(string_num):\n # if string_num.isdigit():\n hex_str = hex(string_num)\n hex_str = hex_str.replace('0x', '')\n if len(hex_str) < 2:\n hex_str = '0' + hex_str\n return hex_str",
"def _convert_hex(self, hex_value):\n if not isinstance(hex_value, str):\n raise TypeError(\"given hex value must be str\")\n m = HEX_RE.match(hex_value)\n if m is None:\n raise ValueError(\"given string does not seem to be Python hex\")\n sign_char, base, exp_sign, exp = [m.group(i) for i in range(1,5)]\n new_sign = \"+\" if sign_char is None else sign_char\n # Line below converts exp to hex value. The \"0x\" prefix is removed \n # with [2:]. The exponent is padded with (too many) zeros (Stata \n # requires 3 digits), and reduced to last 3 digits with [-3:].\n new_exp = (\"000\" + hex(int(exp))[2:])[-3:]\n return \"\".join((new_sign, base, 'X', exp_sign, new_exp))",
"def dec2hex(x):\n return hex(x)[2:]",
"def tohex(data: str) -> str:\n match = re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)\n if match:\n return data.lower()\n match = re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data)\n if not match:\n raise ValueError(f\"Required hex of the form `0x` or `H` found {data}\")\n match = re.match(r\"^[0-9a-fA-F]+\", data)\n return f\"0x{match.group().lower()}\"",
"def int_to_hex(num):\n return hex(num)",
"def bin2hex(x):\n return hex(int(x, 2))[2:]",
"def bin2hex(bit_line):\n return '{:x}'.format(int(bit_line,2))",
"def longtohex(n):\n\n plain=(re.match(r\"0x([0-9A-Fa-f]*)l?$\", hex(n), re.I).group(1)).lower()\n return \"0x\" + plain",
"def a2b_hex(string):\n\n if len(string) % 2 == 1:\n string = '0' + string\n\n try:\n return binascii.a2b_hex(string.encode('ascii'))\n except TypeError:\n raise Error('Invalid hexadecimal string')",
"def hexify(c):\n try:\n s = c.encode(\"utf-8\").encode(\"hex\")\n except UnicodeDecodeError:\n s = 0\n n = len(s)\n if n <= 2: return s\n a = ' - '.join([s[i:i+2] for i in range(0,n,2)])\n return a[:-1]",
"def hex_to_ascii_string(input_string):\n\t\tlist_of_hex = input_string.split(\" \")\n\t\tchars = [chr(int(char, 16)) for char in list_of_hex if char != ' ']\n\t\toutput_string = \"\".join(chars)\n\t\treturn output_string",
"def hexadecimal(runtime_addr, n=1):\n\n set_formatter(runtime_addr, n, mainformatter.hexadecimal_formatter)"
] | [
"0.7127962",
"0.6858865",
"0.67977035",
"0.66891956",
"0.66581327",
"0.6595217",
"0.65746325",
"0.6475897",
"0.6462714",
"0.6440759",
"0.6420401",
"0.63975835",
"0.6394663",
"0.6390578",
"0.6357414",
"0.63572913",
"0.6354585",
"0.6322785",
"0.6259033",
"0.62520534",
"0.6249573",
"0.6235628",
"0.6235408",
"0.6198544",
"0.61670667",
"0.6147486",
"0.6142038",
"0.61154985",
"0.6109438",
"0.60914725"
] | 0.79475677 | 0 |
Convert hexadecimal string to octal string. | def hex2oct(x):
return oct(int(x, 16))[_oct_index:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def oct2hex(x):\n return hex(int(x, 8))[2:]",
"def hex2oct(x):\n # moreZero = random.choice(range(10))\n moreZero = 0\n return oct(int(x, 16)).zfill(moreZero + len(oct(int(x, 16)))).strip('L')",
"def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")",
"def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual",
"def int2hex(n: int) -> str:",
"def hex(string):\n return string.encode('hex')",
"def de_octal(msg):\n try:\n msglist = msg.split(' ')\n characters = []\n for octal in msglist:\n n = int(octal, base=8)\n characters.append(n)\n d_msg = ''\n for c in characters:\n d_msg += ''.join(chr(c))\n return d_msg\n except ValueError:\n print('Invalid octal-encoded message')",
"def dec2oct(x):\n return oct(x)[_oct_index:]",
"def oct2bin(x):\n return bin(int(x, 8))[2:]",
"def hex_to_ascii_string(input_string):\n\t\tlist_of_hex = input_string.split(\" \")\n\t\tchars = [chr(int(char, 16)) for char in list_of_hex if char != ' ']\n\t\toutput_string = \"\".join(chars)\n\t\treturn output_string",
"def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))",
"def convertirHexadecimal(self):\n self.convertir(lambda c: hex(ord(c))[2:], sep=' ')",
"def bin2oct(x):\n return oct(int(x, 2))[_oct_index:]",
"def oct2dec(x):\n return int(x, 8)",
"def Oct(num):\n n = CInt(num)\n if n == 0:\n return \"0\"\n else:\n return oct(n)[2:]",
"def convert_to_hex(input_string: str):\n return \" \".join([hex(ord(ch))[2:] for ch in input_string])",
"def hex2int(r: str) -> int:",
"def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)",
"def _convert_hex(self, hex_value):\n if not isinstance(hex_value, str):\n raise TypeError(\"given hex value must be str\")\n m = HEX_RE.match(hex_value)\n if m is None:\n raise ValueError(\"given string does not seem to be Python hex\")\n sign_char, base, exp_sign, exp = [m.group(i) for i in range(1,5)]\n new_sign = \"+\" if sign_char is None else sign_char\n # Line below converts exp to hex value. The \"0x\" prefix is removed \n # with [2:]. The exponent is padded with (too many) zeros (Stata \n # requires 3 digits), and reduced to last 3 digits with [-3:].\n new_exp = (\"000\" + hex(int(exp))[2:])[-3:]\n return \"\".join((new_sign, base, 'X', exp_sign, new_exp))",
"def hackerrank_Python_String_print_formatted_decimal_octal_hex_binary():\n def print_formatted(number):\n # your code goes here\n\n padw = len(bin(number).lstrip(\"0b\"))\n for i in range(1, number+1):\n print(str(i).rjust(padw) + \" \" \\\n + str(oct(i).lstrip(\"0\")).rjust(padw) + \" \" \\\n + str(hex(i).lstrip(\"0x\").upper()).rjust(padw) + \" \" \\\n + str(bin(i).lstrip(\"0b\").rjust(padw)))\n\n print_formatted(20)\n # 1 1 1 1\n # 2 2 2 10\n # 3 3 3 11\n # 4 4 4 100 ...",
"def a2b_hex(string):\n\n if len(string) % 2 == 1:\n string = '0' + string\n\n try:\n return binascii.a2b_hex(string.encode('ascii'))\n except TypeError:\n raise Error('Invalid hexadecimal string')",
"def conv_hex(num):\n\n if num < 10:\n return str(num)\n if num == 10:\n return 'A'\n if num == 11:\n return 'B'\n if num == 12:\n return 'C'\n if num == 13:\n return 'D'\n if num == 14:\n return 'E'\n if num == 15:\n return 'F'",
"def Hex2Ascii(hexString):\n answer = \"\"\n for x in hexString:\n if x != 0:\n # Ignore 0x00 results\n answer += f\"{x:c}\"\n\n log.debug(f\"Hex {hexString} decoded to {answer}\")\n\n return answer",
"def ascii_string_to_hex(input_string):\n\t\tascii_hex = [hex(ord(char))[2:] for char in input_string]\n\t\toutput_string = ' '.join(ascii_hex)\n\t\treturn output_string",
"def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)",
"def dec2hex(string_num):\n # if string_num.isdigit():\n hex_str = hex(string_num)\n hex_str = hex_str.replace('0x', '')\n if len(hex_str) < 2:\n hex_str = '0' + hex_str\n return hex_str",
"def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())",
"def hexbyte(string):\n#\treturn repr(string)\n\ts = \"\"\n\tfor i in string:\n\t\tif (ord(i) >= ord('A') and ord(i) <= ord('z')) \\\n\t\t\tor (ord(i) >= ord('0') and ord(i) <= ord('9')) \\\n\t\t\tor (ord(i) == ord(\" \")):\n\t\t\ts += \"%s\" % i\n\t\telse:\n\t\t\ts += \"\\\\x%02x\" % ord(i)\n\n#\t\ts += \" \"\n\treturn s",
"def oct(space, w_val):\n # XXX does this need to be a space operation?\n return space.oct(w_val)",
"def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)"
] | [
"0.7262856",
"0.6970385",
"0.6871056",
"0.6547219",
"0.6355148",
"0.626188",
"0.6197579",
"0.608063",
"0.60802263",
"0.6070935",
"0.6067904",
"0.6041254",
"0.6039679",
"0.6004906",
"0.5979743",
"0.59688586",
"0.59631056",
"0.5912364",
"0.5895775",
"0.5880666",
"0.5853619",
"0.58225167",
"0.5792236",
"0.5781505",
"0.57778007",
"0.5774096",
"0.5766716",
"0.5734086",
"0.5733604",
"0.57328427"
] | 0.7017675 | 1 |
Get the first identifier for the next month. | def _next_yymm_id(self, identifier: Identifier) -> Optional[Identifier]:
next_yymm_id = None
if identifier.year is not None and \
identifier.month is not None:
new_year = identifier.year
new_month = identifier.month + 1
new_num = 1
if new_month > 12:
new_month = 1
new_year = new_year + 1
if identifier.is_old_id:
next_yymm_id = '{}/{:02d}{:02d}{:03d}'.format(
identifier.archive, new_year % 100, new_month, new_num)
elif new_year >= 2015:
next_yymm_id = '{:02d}{:02d}.{:05d}'.format(
new_year % 100, new_month, new_num)
else:
next_yymm_id = '{:02d}{:02d}.{:04d}'.format(
new_year % 100, new_month, new_num)
try:
return Identifier(arxiv_id=next_yymm_id)
except IdentifierException:
return None
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_of_next_month(ref_date):\n year, month = add_months(ref_date.year, ref_date.month, 1)\n return type(ref_date)(year, month, 1)",
"def _next_id(self, identifier: Identifier) -> Optional['Identifier']:\n next_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num + 1\n if (identifier.is_old_id and new_num > 999) \\\n or (not identifier.is_old_id\n and identifier.year < 2015\n and new_num > 9999) \\\n or (not identifier.is_old_id\n and identifier.year >= 2015 and new_num > 99999):\n new_num = 1\n new_month = new_month + 1\n if new_month > 12:\n new_month = 1\n new_year = new_year + 1\n\n if identifier.is_old_id:\n next_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n next_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n next_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=next_id)\n except IdentifierException:\n return None\n else:\n return None",
"def __next_month(self, year, month):\n year, month = (year, month + 1) if month < 12 else (year + 1, 1)\n\n return self.create(year, month)",
"def next_month(year, month):\n if month < 12:\n month += 1\n else:\n month = 1\n year += 1\n return year, month",
"def next_month(date):\n\n return date + datetime.timedelta(days=calendar.monthrange(date.year, date.month)[1])",
"def showNextMonth(self):\n pass",
"def get_next_day(self):\n pass",
"def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')",
"def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string",
"def next_month(startdate):\n import datetime\n if not startdate:\n startdate = datetime.datetime.now()\n startdate = startdate.replace(day=1)\n # If it's December then next month is January next year not month 13 of this year\n if startdate.month == 12:\n startdate = startdate.replace(month=1)\n startdate = startdate.replace(year=(startdate.year + 1))\n else:\n startdate = startdate.replace(month=(startdate.month + 1))\n return startdate",
"def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar",
"def getPinnedDayOfNextMonth(year, month, day):\n\tyear = year + (month / 12) # purposeful integer division\n\tmonth = (month % 12) + 1\n\tday = pinDayToMonth(year, month, day)\n\treturn datetime.date(year, month, day)",
"def next_identity(self) -> OrganisationId:\n ...",
"def next_month(dateobj):\n year_delta, old_month = divmod(dateobj.month, 12)\n return datetime.date(dateobj.year + year_delta, old_month + 1, 1)",
"def _ns_nextid(self):\n return self._ns(\"nextid\")",
"def next_identity(self) -> PublicationId:\n ...",
"def _next_month(self):\r\n self._canvas.place_forget()\r\n\r\n year, month = self._date.year, self._date.month\r\n self._date = self._date + self.timedelta(\r\n days=calendar.monthrange(year, month)[1] + 1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstruct calendar\r",
"def first_day_of_month(date):\n return date.replace(day=1)",
"def first_day_of_month(date):\n return date.replace(day=1)",
"def get_first_date(in_month=1):\n\n from_date = (today-relativedelta(months=in_month)).replace(day=1)\n \n return from_date",
"def get_next_month(self, startdate, fmt=None):\n days_in_start_month = calendar.monthrange(startdate.year, startdate.month)[1]\n first_day_next_month = startdate + datetime.timedelta(days=(days_in_start_month - startdate.day + 1))\n days_in_next_month = calendar.monthrange(first_day_next_month.year, first_day_next_month.month)[1]\n last_day_next_month = first_day_next_month + datetime.timedelta(days=(days_in_next_month - 1))\n next_month = (first_day_next_month, last_day_next_month)\n return next_month",
"def get_next(self, formatted=True):\n self.last += 1\n self.save()\n number_length = 5\n last_length = len(str(self.last))\n number_prefix = '0'*(number_length - last_length)\n prefixed_number = number_prefix + str(self.last)\n\n if formatted and self.format:\n formatted_id = self.format % prefixed_number\n else:\n formatted_id = '{year}-{number}'.format(year=date.today().year, number=prefixed_number)\n return formatted_id",
"def nextDay(year1, month1, day1):\n if day1 < 30:\n day1 += 1\n else:\n if month1 < 12:\n month1 += 1\n day1 = 1\n else:\n year1 += 1\n month1 = 1\n day1 = 1\n \n return(year1, month1, day1)",
"def reserve_next_run_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('task_history', 'run_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]",
"def get_next_serial(self):\n T = time.gmtime()\n base = T[0] * 10000 + T[1] * 100 + T[2]\n s_base = self.serial // 100\n if s_base < base:\n return base * 100 # New day\n else:\n return self.serial + 1 # May cause future lap",
"def nextDay(year, month, day):\n if day < daysInMonth(year,month):\n return year, month, day + 1\n else:\n if month == 12:\n return year + 1, 1, 1\n else:\n return year, month + 1, 1",
"def reserve_next_agent_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('agents', 'agent_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]",
"def next(self):\n to_return = str(self._current_year)\n self._current_year += 1\n return to_return",
"def advance_one(self):\n days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if self.is_leap_year():\n days_in_month[2] = 29\n if self.day == days_in_month[self.month]:\n if self.month==12:\n self.year+=1\n self.month=1\n else:\n self.month+=1\n self.day =1\n else:\n self.day+=1",
"def nextDay(year, month, day):\n if day < 30:\n day += 1\n else:\n if month < 12:\n month += 1\n day = 1\n else:\n year += 1\n month = 1\n day = 1\n \n return(year, month, day)"
] | [
"0.71140623",
"0.70819384",
"0.65288323",
"0.64989054",
"0.64522034",
"0.6431146",
"0.6364784",
"0.63500136",
"0.63453865",
"0.62631005",
"0.6257072",
"0.6244404",
"0.6200054",
"0.6192637",
"0.6188998",
"0.6186235",
"0.6151943",
"0.61216503",
"0.61216503",
"0.61076903",
"0.60252434",
"0.5975461",
"0.5971931",
"0.5949029",
"0.5924709",
"0.59055823",
"0.58754987",
"0.5873702",
"0.5800226",
"0.57937044"
] | 0.74007684 | 0 |
Get previous consecutive Identifier relative to provided Identifier. | def _previous_id(self, identifier: Identifier) -> Optional['Identifier']:
previous_id = None
if identifier.year is not None and \
identifier.month is not None and \
identifier.num is not None:
new_year = identifier.year
new_month = identifier.month
new_num = identifier.num - 1
if new_num == 0:
new_month = new_month - 1
if new_month == 0:
new_month = 12
new_year = new_year - 1
if identifier.is_old_id:
if new_num == 0:
new_num = 999
previous_id = '{}/{:02d}{:02d}{:03d}'.format(
identifier.archive, new_year % 100, new_month, new_num)
else:
if new_year >= 2015:
if new_num == 0:
new_num = 99999
previous_id = '{:02d}{:02d}.{:05d}'.format(
new_year % 100, new_month, new_num)
else:
if new_num == 0:
new_num = 9999
previous_id = '{:02d}{:02d}.{:04d}'.format(
new_year % 100, new_month, new_num)
try:
return Identifier(arxiv_id=previous_id)
except IdentifierException:
return None
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_previous_id(identifier: Identifier) -> Optional[Identifier]:\n return current_session().get_previous_id(identifier)",
"def getPreviousElement(self,currentId):\n\tids = self.getObjectIds()\n\tpreviousId = None\n\tfor id in ids:\n\t if id == currentId:\n\t\treturn previousId\n\t else:\n\t\tpreviousId = id\n\treturn None",
"def previous(self):\n return Reference(\":\".join(self.names[:-2]))",
"def get_previous_id(self, identifier: Identifier) -> Optional[Identifier]:\n previous_id = self._previous_id(identifier)\n if not previous_id:\n return None\n\n if identifier.year == previous_id.year \\\n and identifier.month == previous_id.month:\n return previous_id\n\n path = self._get_parent_path(previous_id)\n if not os.path.exists(path):\n return None\n\n for _, _, file_list in os.walk(path):\n abs_files = [f[:-4] for f in file_list if f.endswith('.abs')]\n if not abs_files:\n return None\n max_id = max(abs_files)\n try:\n if previous_id.is_old_id:\n short_id = Identifier(\n arxiv_id=f'{previous_id.archive}/{max_id}')\n else:\n short_id = Identifier(arxiv_id=max_id)\n return short_id\n\n except IdentifierException:\n return None\n\n return None",
"def _previous(self):\n return self.token_list[self._current - 1]",
"def get_previous_index(self, current_index_string):\n # current index is a string, so cast to int\n current_index = int(current_index_string)\n\n return current_index-1",
"def get_previous(self):\n return self._next_previous_helper('previous')",
"def get_previous_step(self):\n return self.get_step_by_index(-2)",
"def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))",
"def getPreviousElement(self,givenElement):\n\tpreviousElement = None\n\tfor element in self.getElements():\n\t if element==givenElement:\n\t\treturn previousElement\n\t previousElement=element\n\treturn None",
"def get_previous(self):\n return self.previous",
"def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1",
"def DecodePreviousInstruction(ea):\n insn = ida_ua.insn_t()\n prev_addr = ida_ua.decode_prev_insn(insn, ea)\n return insn if prev_addr != ida_idaapi.BADADDR else None",
"def get_prev_seg(*args):\n return _ida_segment.get_prev_seg(*args)",
"def getPrevious(self):\n return self.__previous__",
"def previous_id(self):\n try:\n return Report.objects.filter(id__lt=self.id).order_by(\"-id\").first().id\n except Exception:\n return False",
"def getPrev(crossing):\n return",
"def _previous(self, coord):\n candidates = [(coord[0] - 1, coord[1]), (coord[0] + 1, coord[1]), (coord[0], coord[1] - 1), (coord[0], coord[1] + 1)]\n for candidate in (x for x in candidates if 0 <= x[0] < self.dimension and 0 <= x[1] < self.dimension):\n if self.board[candidate[0]][candidate[1]].next == self.board[coord[0]][coord[1]]:\n return candidate",
"def previous_symbol(self):\r\n if self.position == 0:\r\n return None\r\n return self.rule.rightside[self.position-1]",
"def previous_player(current_player, players):\n if len(players) == 1:\n return players[0]\n if current_player != players[0]:\n return players[players.index(current_player) - 1]\n return players[-1]",
"def get_previous_block(self):\r\n return self.chain[-1] # Return the previous block\r",
"def PreviousApplicationIdentity(self) -> _n_0_t_2:",
"def previous(self):\n if self.currentframe > 0:\n return self.getframe(self.currentframe - 1)\n else:\n newobj = pixiimage()\n newobj.read(previous_filename(\n self.sequencefilename))\n return newobj",
"def previous(self):\n return self.my_previous",
"def prev(self):\n seg = Segment(segment_t=idaapi.get_prev_seg(self.ea))\n\n if seg.ea >= self.ea:\n raise exceptions.NoMoreSegments(\"This is the first segment. no segments exist before it.\")\n\n return seg",
"def get_previous_observation(self):\n if len(self.observation_history) == 1:\n return None\n else:\n return self.observation_history[-2]",
"def get_prev(self):\n return self.prev",
"def get_previous(self, limit, offset):\r\n if offset - limit < 0:\r\n return None\r\n\r\n return self._generate_uri(limit, offset - limit)",
"def locate_predecessor(self, key):\r\n index = self.search(key)\r\n return index-1",
"def getPreviousObservation(self):\n\n if (len(self.observationHistory) <= 1):\n return None\n\n return self.observationHistory[-2]"
] | [
"0.68398184",
"0.6584527",
"0.63730377",
"0.63493836",
"0.6142223",
"0.6125821",
"0.61153346",
"0.597462",
"0.5952775",
"0.5942755",
"0.5874705",
"0.5858013",
"0.5850779",
"0.57981914",
"0.5797494",
"0.57876164",
"0.57495373",
"0.5744229",
"0.5709335",
"0.56956834",
"0.5666189",
"0.5647077",
"0.564254",
"0.560626",
"0.55994636",
"0.5583495",
"0.5581953",
"0.5580193",
"0.5553495",
"0.5546125"
] | 0.6983114 | 0 |
Parse arXiv .abs file. | def parse_abs_file(filename: str) -> DocMetadata:
try:
with open(filename, mode='r', encoding='latin-1') as absf:
raw = absf.read()
except FileNotFoundError:
raise AbsNotFoundException
except UnicodeDecodeError as e:
# TODO: log this
raise AbsParsingException(
f'Failed to decode .abs file "{filename}": {e}')
# TODO: clean up
modified = datetime.fromtimestamp(
os.path.getmtime(filename), tz=gettz('US/Eastern'))
modified = modified.astimezone(tz=tzutc())
# there are two main components to an .abs file that contain data,
# but the split must always return four components
components = RE_ABS_COMPONENTS.split(raw)
if len(components) > 4:
components = alt_component_split(components)
if not len(components) == 4:
raise AbsParsingException(
'Unexpected number of components parsed from .abs.')
# everything else is in the second main component
prehistory, misc_fields = re.split(r'\n\n', components[1])
fields: Dict[str, Any] = \
AbsMetaSession._parse_metadata_fields(key_value_block=misc_fields)
# abstract is the first main component
fields['abstract'] = components[2]
id_match = RE_ARXIV_ID_FROM_PREHISTORY.match(prehistory)
if not id_match:
raise AbsParsingException(
'Could not extract arXiv ID from prehistory component.')
arxiv_id = id_match.group('arxiv_id')
prehistory = re.sub(r'^.*\n', '', prehistory)
parsed_version_entries = re.split(r'\n', prehistory)
# submitter data
from_match = RE_FROM_FIELD.match(parsed_version_entries.pop(0))
if not from_match:
raise AbsParsingException('Could not extract submitter data.')
name = from_match.group('name')
if name is not None:
name = name.rstrip()
email = from_match.group('email')
# get the version history for this particular version of the document
if not len(parsed_version_entries) >= 1:
raise AbsParsingException('At least one version entry expected.')
(version, version_history, arxiv_id_v) \
= AbsMetaSession._parse_version_entries(
arxiv_id=arxiv_id,
version_entry_list=parsed_version_entries)
arxiv_identifier = Identifier(arxiv_id=arxiv_id)
# named (key-value) fields
if not all(rf in fields for rf in REQUIRED_FIELDS):
raise AbsParsingException(f'missing required field(s)')
# some transformations
category_list: List[str] = []
primary_category = None
if 'categories' in fields and fields['categories']:
category_list = fields['categories'].split()
if category_list[0] in taxonomy.CATEGORIES:
primary_category = Category(category_list[0])
primary_archive = \
Archive(
taxonomy.CATEGORIES[primary_category.id]['in_archive'])
elif arxiv_identifier.is_old_id:
primary_archive = Archive(arxiv_identifier.archive)
elif arxiv_identifier.is_old_id:
primary_archive = Archive(arxiv_identifier.archive)
else:
raise AbsException('Cannot infer archive from identifier.')
doc_license: License = \
License() if 'license' not in fields else License(
recorded_uri=fields['license'])
raw_safe = re.sub(RE_FROM_FIELD, r'\g<from>\g<name>', raw, 1)
return DocMetadata(
raw_safe=raw_safe,
arxiv_id=arxiv_id,
arxiv_id_v=arxiv_id_v,
arxiv_identifier=Identifier(arxiv_id=arxiv_id),
title=fields['title'],
abstract=fields['abstract'],
authors=AuthorList(fields['authors']),
submitter=Submitter(name=name, email=email),
categories=fields['categories'] if 'categories' in fields else None,
primary_category=primary_category,
primary_archive=primary_archive,
primary_group=Group(
taxonomy.ARCHIVES[primary_archive.id]['in_group']),
secondary_categories=[
Category(x) for x in category_list[1:]
if (category_list and len(category_list) > 1)
],
journal_ref=None if 'journal_ref' not in fields
else fields['journal_ref'],
report_num=None if 'report_num' not in fields
else fields['report_num'],
doi=None if 'doi' not in fields else fields['doi'],
acm_class=None if 'acm_class' not in fields else
fields['acm_class'],
msc_class=None if 'msc_class' not in fields else
fields['msc_class'],
proxy=None if 'proxy' not in fields else fields['proxy'],
comments=fields['comments'] if 'comments' in fields else None,
version=version,
license=doc_license,
version_history=version_history,
modified=modified
# private=private # TODO, not implemented
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(filename):\n o = open(filename)\n s = o.read()\n a = ArffFile.parse(s)\n o.close()\n return a",
"def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data",
"def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms",
"def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)",
"def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S",
"def parse(self):\n try:\n self.validate()\n except Exception as e:\n raise AssetmapError(e)\n\n tree = ET.parse(self.path)\n root = tree.getroot()\n # ElementTree prepends the namespace to all elements, so we need to extract\n # it so that we can perform sensible searching on elements.\n assetmap_ns = get_namespace(root.tag)\n\n self.id = get_element_text(root, \"Id\", assetmap_ns).split(\":\")[2]\n self.annotation_text = get_element_text(root, \"AnnotationText\", assetmap_ns)\n self.volume_count = int(get_element_text(root, \"VolumeCount\", assetmap_ns))\n self.issue_date = parse_date(get_element_text(root, \"IssueDate\", assetmap_ns))\n self.issuer = get_element_text(root, \"Issuer\", assetmap_ns)\n self.creator = get_element_text(root, \"Creator\", assetmap_ns)\n\n asset_list = get_element(root, \"AssetList\", assetmap_ns)\n # Get the data from the ASSETMAP file\n for asset in asset_list.getchildren():\n asset_id = get_element_text(asset, \"Id\", assetmap_ns).split(\":\")[2]\n for chunklist in get_element_iterator(asset, \"ChunkList\", assetmap_ns):\n \"\"\"\n The code below assumes that there will only ever be one chunk in a chunklist. Chunking is\n used to split files up into smaller parts, usually in order to provide compatability with older\n filesystems, which is not applicable for our uses.\n \"\"\"\n for chunk in chunklist.getchildren():\n v = get_element_text(chunk, \"VolumeIndex\", assetmap_ns)\n o = get_element_text(chunk, \"Offset\", assetmap_ns)\n l = get_element_text(chunk, \"Length\", assetmap_ns)\n\n a = {\n \"path\": get_element_text(chunk, \"Path\", assetmap_ns),\n \"volume_index\": int(v) if v is not None else v,\n \"offset\": int(o) if o is not None else o,\n \"length\": int(l) if l is not None else l\n }\n\n self.assets[asset_id] = AssetData(**a)",
"def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']",
"def readArff(filename):\n \n data = []\n labels = []\n\n def parseLine(line): # csv.reader could not do this.\n isopen = False\n current = ''\n for c in line:\n if c == \"'\":\n if isopen:\n yield current\n current = ''\n isopen = not isopen\n elif isopen:\n current += c\n\n #with filename.open() as f:\n with bz2.open(str(filename)+'.bz2', 'r') as f:\n \n line = ''\n while line != '@data':\n line = f.readline().decode().strip()\n if line.startswith(\"@attribute 'classification'\"):\n line = line[line.find('{') + 1:line.find('}')]\n classes = {i:n for n,i in enumerate(parseLine(line))}\n\n for line in f.read().decode().splitlines():\n record = list(parseLine(line))\n labels.append(classes[record[-1]])\n data.append([int(x) for x in record[:-1]])\n return numpy.array(data, dtype=float), numpy.array(labels), classes",
"def main():\n\n\tfilename = sys.argv[1]\n\tarrayabsurdit = ArrayAbsurdit(filename)\n\tarrayabsurdit.read_file()",
"def parse_elamx_file(filename):\n laminate_list = []\n lamina_dict = {}\n tree = ET.parse(filename)\n root = tree.getroot()\n laminates = root.find('laminates') # laminates in the file\n materials = root.find('materials') # laminae in the file\n for material in materials:\n lamina_dict[material.attrib['uuid']] = parse_elamx_material(material)\n for laminate in laminates:\n laminate_list.append(parse_elamx_laminate(laminate, lamina_dict))\n return laminate_list",
"def parse_file_sax(infile):\n \n from xml.sax import make_parser\n\n ## instantiate the XML handler\n handler = ModXMLHandler()\n parser = make_parser()\n ## associate the handler with the parser\n parser.setContentHandler(handler)\n\n #infile = open(file,'r')\n \n ## actually parse the file\n parser.parse(infile)\n infile.close()\n\n local = []\n fetch = []\n \n for data in [handler.getAlignment(), handler.getReference()] + handler.getDerived():\n ## data will be a 2-tuple with containing two strings. The first one is the name of a file\n ## and the second is the URL of that file\n \n ## sometimes, there won't be a URL (and data[1].strip() will be None) if the file can be fetched\n ## from the PDB\n if data[1].strip():\n loc = DBPuppet.getURL(data[1], data[0])\n ## append the name of the file you will write to the 'local' list\n local.append(loc)\n else:\n ## needs to be fetched from the web\n fetch.append(\"%s\" % str(data[0]) )\n \n ## open the files..\n openInChimera(local, fetch)",
"def parse_facs_files():\n\n #Load parser settings\n parser_settings = getattr(settings,'FACS_PARSER_SETTINGS')\n\n files_to_parse = [parser_settings['facs_source_directory']+f for f in os.listdir(parser_settings['facs_source_directory']) if '.exp' in f]\n\n for filename in files_to_parse: \n\n #Compute MD5 hash\n facs_file = file(filename,'rbU')\n md5hash = hashlib.md5(facs_file.read()).hexdigest()\n facs_file.close()\n \n #Skip file if previously parsed.\n if FacsFile.objects.filter(original_filename=filename,md5hash=md5hash):\n print 'Skipping ', filename\n continue\n\n #Open file, remove null bytes and prepare csv reader\n facs_file = file(filename, 'rU')\n csv_reader = csv.reader((x.replace('\\0', '') for x in facs_file),dialect=csv.excel_tab)\n\n #Reader header\n csv_header = csv_reader.next()\n facs_file_results = []\n\n #Parse the file\n for csv_row in csv_reader:\n if csv_row[0]:\n facs_file_results.append(dict(zip(csv_header,csv_row)))\n\n #Close the file\n facs_file.close()\n\n #Save the information to database and archive file\n random_ints = ''.join([str(random.randint(0,9)) for n in range(10)])\n archive_filename = parser_settings['facs_archive_directory'] + filename.split('/')[-1][:-4].split('_')[0] + '_' + random_ints + '.exp'\n shutil.move(filename, archive_filename)\n\n facs_file = FacsFile(\n original_filename = filename,\n md5hash = md5hash,\n archive_filename = archive_filename,\n )\n facs_file.save()\n\n #Remove empty elements\n for result in facs_file_results:\n for key, data in result.items():\n if data == '.' or not(data):\n del result[key]\n\n #Cache test code and interface mappings\n test_codes = []\n for testcode_mapping in TestCodeMapping.objects.filter(interface_name=parser_settings['testcode_interface_name']):\n test_code = testcode_mapping.code\n code = test_code.code\n code_mapping = testcode_mapping.code_mapping\n\n test_codes.append((code, code_mapping, test_code))\n\n #Add results to database\n for result in facs_file_results:\n\n #Parse result date\n result_date = dateutil.parser.parse(result[parser_settings['result_datetime']])\n result_error_code = getattr(result, parser_settings['error_codes'], '')\n result_identifier = result[parser_settings['sample_identifier']]\n result_cytometer = result[parser_settings['cytometer_serial']]\n\n #Create the dictionnary of result items.\n new_result_item_dict = {}\n for test_code, facs_file_column, test_code_object in test_codes:\n new_result_item_dict[test_code] = ResultItem(\n test_code = test_code_object,\n result_item_value = result[facs_file_column],\n error_code = result_error_code,\n result_item_datetime = result_date,\n )\n\n #Search for possible duplicate result\n is_duplicate = False\n for possible_duplicate in FacsResult.objects.filter(result_identifier=result_identifier):\n if possible_duplicate.get_resultitem_dict() == new_result_item_dict:\n is_duplicate = True\n break\n\n #Save result and result item to data if it is not a duplicate\n if not is_duplicate:\n \n new_result = FacsResult(\n result_identifier=result_identifier,\n result_datetime=result_date,\n origin_facs_file=facs_file,\n cytometer_serial_number=result_cytometer,\n )\n \n new_result.save()\n \n #Add the reference to the result for each item and save it to database.\n for item in new_result_item_dict.values():\n item.result = new_result\n item.save()\n\n new_result.link_to_requisition()",
"def extractAnimationsFromXar( strFilename ):\n print( \"INF: extractAnimationFromXar: parsing '%s'\" % strFilename );\n allAnims = dict();\n xar = xml.dom.minidom.parse( strFilename );\n choregrapheNode = xar.childNodes[0]; # first is \"ChoregrapheProject\"\n strXarVersion = choregrapheNode.getAttribute( \"xar_version\" );\n print( \"strXarVersion: %s\" % strXarVersion );\n #~ print( domNodeToString( choregrapheNode ) );\n # look for root box\n for node in choregrapheNode.childNodes:\n if( node.nodeType != xml.dom.minidom.Node.TEXT_NODE and node.hasAttribute( \"name\" ) ):\n if( node.getAttribute( \"name\" ) == \"root\" ):\n break;\n else:\n return False;\n rootNode = node;\n #~ print( domNodeToString( rootNode ) );\n listNodesBox = findElementByName( rootNode, \"Box\" ); # find all elements with a specific name, and return them in an array\n print( \"listNodesBox found: %d\" % len( listNodesBox ) );\n #~ print( domNodeToString( listNodesBox[8] ) );\n for node in listNodesBox:\n strAnimationName = node.getAttribute( \"name\" );\n strAnimationName = strAnimationName.replace( \" \", \"_\" );\n listTimeline = findElementByName( node, \"Timeline\" );\n #~ print( domNodeToString( listTimeline[0] ) );\n listNames = [];\n listTimes = [];\n listPositions = []; \n for timeline in listTimeline:\n if( len(listTimeline) > 1 ):\n print( \"ERR: more than one timeline in a box: not handled case! (strAnimationName:%s)\" % strAnimationName );\n return;\n #~ print( str( timeline.attributes ) );\n #~ print( domNodeToString( timeline ) );\n #~ print( domAttributesToString( timeline ) );\n nFps = int( timeline.getAttribute( \"fps\" ) );\n #~ print( \"fps: %d\" % nFps );\n listActuator = findElementByName( timeline, \"ActuatorCurve\" );\n for actuator in listActuator:\n strActuatorName = str(actuator.getAttribute( \"actuator\" )); # str => remove unicode\n listNames.append( strActuatorName );\n listKey = findElementByName( actuator, \"Key\" );\n keyTimes = [];\n keyPositions = [];\n if( len(listKey) < 1 ):\n print( \"WRN: extractAnimationFromXar: in the box %s, the joint %s is used but no keys are defined for it, removing it from the used joint list...\" % ( strAnimationName, strActuatorName ) );\n del listNames[-1];\n continue;\n for key in listKey:\n rKeyNumber = float( key.getAttribute( \"frame\" ) );\n rKeyVal = float( key.getAttribute( \"value\" ) ) * math.pi/180;\n keyTimes.append( rKeyNumber / nFps );\n listTangent = findElementByName( actuator, \"Tangent\" );\n if( len( listTangent ) == 0 ):\n keyPositions.append( rKeyVal ); # no splines there\n else:\n keyPositions.append( [rKeyVal] ); # prepare for appending spline info\n for tangent in listTangent:\n #~ print( domAttributesToString( tangent ) );\n strInterpType=tangent.getAttribute( \"interpType\" );\n strSide=tangent.getAttribute( \"strSide\" );\n rAbscissaParam=float( tangent.getAttribute( \"abscissaParam\" ) )/nFps;\n rOrdinateParam=float( tangent.getAttribute( \"ordinateParam\" ) ) * math.pi/180;\n if( strInterpType == \"linear\" ):\n keyPositions[-1].append( [1,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n elif( strInterpType == \"bezier\" ):\n keyPositions[-1].append( [2,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n else:\n print( \"ERR: extractAnimationFromXar: this type isn't handled: '%s'\" % strInterpType );\n listTimes.append( keyTimes );\n listPositions.append( keyPositions );\n # for actuator\n allAnims[strAnimationName] = [listNames,listTimes,listPositions];\n # for timeline \n # for node\n print( \"INF: extractAnimationFromXar: exiting with %d anim(s)\" % len(allAnims) );\n return allAnims;",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def atmprofileread(filename):\n f = open(filename, 'r')\n line1 = f.readline()\n Nst = int(line1.split()[-1])\n line = f.readline()\n Np = int(line.split()[1])\n atm = 0*numpy.ndarray(shape=(Nst, Np, 5), dtype=float)\n S = 0*numpy.ndarray(shape=(Nst), dtype=float)\n f = open(filename, 'r')\n f.readline()\n for i in range(Nst):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(Np):\n line = f.readline()\n for k in range(numpy.shape(atm)[-1]):\n atm[i, j, k] = float(line.split()[k])\n f.close()\n return atm, S",
"def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm",
"def parse_afos(self):\n # at most, only look at the top four lines\n data = \"\\n\".join([line.strip()\n for line in self.sections[0].split(\"\\n\")[:4]])\n tokens = re.findall(\"^([A-Z0-9 ]{4,6})$\", data, re.M)\n if tokens:\n self.afos = tokens[0]",
"def _read_arasim_antenna_data(filename):\n data = {}\n freqs = set()\n thetas = set()\n phis = set()\n freq = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('freq'):\n freq = 1\n if words[-1]==\"Hz\":\n pass\n elif words[-1]==\"kHz\":\n freq *= 1e3\n elif words[-1]==\"MHz\":\n freq *= 1e6\n elif words[-1]==\"GHz\":\n freq *= 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n freq *= float(words[-2])\n freqs.add(freq)\n elif line.startswith('SWR'):\n swr = float(words[-1])\n elif len(words)==5 and words[0]!=\"Theta\":\n theta = int(words[0])\n thetas.add(theta)\n phi = int(words[1])\n phis.add(phi)\n db_gain = float(words[2])\n # AraSim actually only seems to use the sqrt of the gain\n # (must be gain in power, not voltage)\n # gain = np.sqrt(float(words[3]))\n gain = np.sqrt(10**(db_gain/10))\n phase = np.radians(float(words[4]))\n data[(freq, theta, phi)] = (gain, phase)\n\n # Convert data dictionary into 3-D array of responses\n response = np.empty((len(freqs), len(thetas), len(phis)),\n dtype=np.complex_)\n for i, freq in enumerate(sorted(freqs)):\n for j, theta in enumerate(sorted(thetas)):\n for k, phi in enumerate(sorted(phis)):\n gain, phase = data[(freq, theta, phi)]\n response[i, j, k] = gain * np.exp(1j*phase)\n\n response_data = (response, np.array(sorted(freqs)),\n np.array(sorted(thetas)), np.array(sorted(phis)))\n return _fix_response_wrapping(response_data)",
"def parse_azimuth_elevation(filename):\n match = REGEX.match(filename)\n return int(match.group(1)), int(match.group(2))",
"def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()",
"def test_parse_rarefaction_fname(self):\r\n fname = \"alpha_rarefaction_900_3.txt\"\r\n base, seqs, iter, ext = parse_rarefaction_fname(fname)\r\n self.assertEqual((base, seqs, iter, ext),\r\n (\"alpha_rarefaction\", 900, 3, \".txt\"))",
"def readFastaFile(filename):",
"def _read_aberrations(SCA):\n from .. import meta_data\n from . import pixel_scale, n_pix, pixel_scale_mm\n\n # Construct filename.\n sca_str = '_%02d'%SCA\n infile = os.path.join(meta_data.share_dir, 'roman',\n zemax_filepref + sca_str + zemax_filesuff)\n\n # Read in data.\n dat = np.loadtxt(infile)\n # It actually has 5 field positions, not just 1, to allow us to make position-dependent PSFs\n # within an SCA eventually. Put it in the required format: an array of length (5 field\n # positions, 23 Zernikes), with the first entry empty (Zernike polynomials are 1-indexed so we\n # use entries 1-22). The units are waves.\n aberrations = np.zeros((5,23))\n aberrations[:,1:] = dat[:,5:]\n # Also get the field position. The file gives it in mm with respect to the center, but we\n # want it in pixels with respect to the corner. The pixel size of the detector is 0.01 mm/pixel\n # The y-coordinates have the opposite signs to the corresponding WFI location, explained \n # in the Roman file.\n\n x_sca_pos = dat[:,1]/pixel_scale_mm + n_pix/2\n y_sca_pos = n_pix/2 - dat[:,2]/pixel_scale_mm\n return aberrations, x_sca_pos, y_sca_pos",
"def _parse(self, infile):\n raise NotImplementedError()",
"def fromfileobj(cls, fileobj, fullparse=True):\n buf = fileobj.read(_ArInfoStruct.size)\n if not buf:\n return None\n\n if len(buf) < _ArInfoStruct.size:\n raise IOError(\n 'not enough data for header, got %r, needed %r' % (\n len(buf), _ArInfoStruct.size))\n\n name, mtime, uid, gid, mode, datasize, magic = _ArInfoStruct.unpack(buf)\n\n datasize = int(datasize)\n if fullparse:\n mtime = int(mtime)\n uid = int(uid)\n gid = int(gid)\n mode = int(mode, 8)\n\n if name.startswith('#1/'):\n arformat = AR_FORMAT_BSD\n\n try:\n filenamesize = int(name[3:])\n except ValueError:\n raise IOError('invalid file name length: %r' % name[3:])\n\n filename = fileobj.read(filenamesize)\n if len(filename) != filenamesize:\n raise IOError(\n 'not enough data for filename, got %r, needed %r' % (\n len(name), filenamesize))\n\n filesize = datasize - filenamesize\n\n elif name.startswith('/'):\n arformat = AR_FORMAT_SYSV\n raise SystemError('%s format is not supported.' % arformat)\n\n else:\n arformat = AR_FORMAT_SIMPLE\n filename = name.strip()\n filesize = datasize\n\n if magic != AR_MAGIC_BIT:\n raise IOError('file magic invalid, got %r, needed %r' % (\n magic, AR_MAGIC_BIT))\n\n return cls(\n arformat, filename.decode('utf-8'), filesize, mtime, uid, gid, mode)",
"def read_log_esc(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ESC (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)), float(m.group(6)), \n float(m.group(7)), float(m.group(8))])\n return np.array(list_meas)",
"def _scan_axograph_file(self):\n\n self.info = {}\n\n with open(self.filename, 'rb') as fid:\n f = StructFile(fid)\n\n self.logger.debug('filename: {}'.format(self.filename))\n self.logger.debug('')\n\n # the first 4 bytes are always a 4-character file type identifier\n # - for early versions of AxoGraph, this identifier was 'AxGr'\n # - starting with AxoGraph X, the identifier is 'axgx'\n header_id = f.read(4).decode('utf-8')\n self.info['header_id'] = header_id\n assert header_id in ['AxGr', 'axgx'], \\\n 'not an AxoGraph binary file! \"{}\"'.format(self.filename)\n\n self.logger.debug('header_id: {}'.format(header_id))\n\n # the next two numbers store the format version number and the\n # number of data columns to follow\n # - for 'AxGr' files, these numbers are 2-byte unsigned short ints\n # - for 'axgx' files, these numbers are 4-byte long ints\n # - the 4-character identifier changed from 'AxGr' to 'axgx' with\n # format version 3\n if header_id == 'AxGr':\n format_ver, n_cols = f.read_f('HH')\n assert format_ver == 1 or format_ver == 2, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n elif header_id == 'axgx':\n format_ver, n_cols = f.read_f('ll')\n assert format_ver >= 3, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n else:\n raise NotImplementedError(\n 'unimplemented file header identifier \"{}\"!'.format(\n header_id))\n self.info['format_ver'] = format_ver\n self.info['n_cols'] = n_cols\n\n self.logger.debug('format_ver: {}'.format(format_ver))\n self.logger.debug('n_cols: {}'.format(n_cols))\n self.logger.debug('')\n\n ##############################################\n # BEGIN COLUMNS\n\n sig_memmaps = []\n sig_channels = []\n for i in range(n_cols):\n\n self.logger.debug('== COLUMN INDEX {} =='.format(i))\n\n ##############################################\n # NUMBER OF DATA POINTS IN COLUMN\n\n n_points = f.read_f('l')\n\n self.logger.debug('n_points: {}'.format(n_points))\n\n ##############################################\n # COLUMN TYPE\n\n # depending on the format version, data columns may have a type\n # - prior to version 3, column types did not exist and data was\n # stored in a fixed pattern\n # - beginning with version 3, several data types are available\n # as documented in AxoGraph_ReadWrite.h\n if format_ver == 1 or format_ver == 2:\n col_type = None\n elif format_ver >= 3:\n col_type = f.read_f('l')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('col_type: {}'.format(col_type))\n\n ##############################################\n # COLUMN NAME AND UNITS\n\n # depending on the format version, column titles are stored\n # differently\n # - prior to version 3, column titles were stored as\n # fixed-length 80-byte Pascal strings\n # - beginning with version 3, column titles are stored as\n # variable-length strings (see StructFile.read_string for\n # details)\n if format_ver == 1 or format_ver == 2:\n title = f.read_f('80p').decode('utf-8')\n elif format_ver >= 3:\n title = f.read_f('S')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('title: {}'.format(title))\n\n # units are given in parentheses at the end of a column title,\n # unless units are absent\n if len(title.split()) > 0 and title.split()[-1][0] == '(' and \\\n title.split()[-1][-1] == ')':\n name = ' '.join(title.split()[:-1])\n units = title.split()[-1].strip('()')\n else:\n name = title\n units = ''\n\n self.logger.debug('name: {}'.format(name))\n self.logger.debug('units: {}'.format(units))\n\n ##############################################\n # COLUMN DTYPE, SCALE, OFFSET\n\n if format_ver == 1:\n\n # for format version 1, all columns are arrays of floats\n\n dtype = 'f'\n gain, offset = 1, 0 # data is neither scaled nor off-set\n\n elif format_ver == 2:\n\n # for format version 2, the first column is a \"series\" of\n # regularly spaced values specified merely by a first value\n # and an increment, and all subsequent columns are arrays\n # of shorts with a scaling factor\n\n if i == 0:\n\n # series\n first_value, increment = f.read_f('ff')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n # scaled short\n dtype = 'h'\n gain, offset = \\\n f.read_f('f'), 0 # data is scaled without offset\n\n elif format_ver >= 3:\n\n # for format versions 3 and later, the column type\n # determines how the data should be read\n # - column types 1, 2, 3, and 8 are not defined in\n # AxoGraph_ReadWrite.h\n # - column type 9 is different from the others in that it\n # represents regularly spaced values\n # (such as times at a fixed frequency) specified by a\n # first value and an increment, without storing a large\n # data array\n\n if col_type == 9:\n\n # series\n first_value, increment = f.read_f('dd')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n if i == 0:\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n raise NotImplementedError(\n 'series data are supported only for the first '\n 'data column (time)!')\n\n elif col_type == 4:\n\n # short\n dtype = 'h'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 5:\n\n # long\n dtype = 'l'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 6:\n\n # float\n dtype = 'f'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 7:\n\n # double\n dtype = 'd'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 10:\n\n # scaled short\n dtype = 'h'\n gain, offset = f.read_f('dd') # data scaled w/ offset\n\n else:\n\n raise NotImplementedError(\n 'unimplemented column type \"{}\"!'.format(col_type))\n\n else:\n\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n ##############################################\n # COLUMN MEMMAP AND CHANNEL INFO\n\n # create a memory map that allows accessing parts of the file\n # without loading it all into memory\n array = np.memmap(\n self.filename,\n mode='r',\n dtype=f.byte_order + dtype,\n offset=f.tell(),\n shape=n_points)\n\n # advance the file position to after the data array\n f.seek(array.nbytes, 1)\n\n if i == 0:\n # assume this is the time column containing n_points values\n\n # verify times are spaced regularly\n diffs = np.diff(array)\n increment = np.median(diffs)\n max_frac_step_deviation = np.max(np.abs(\n diffs / increment - 1))\n tolerance = 1e-3\n if max_frac_step_deviation > tolerance:\n self.logger.debug('largest proportional deviation '\n 'from median step size in the first '\n 'column exceeds the tolerance '\n 'of ' + str(tolerance) + ':'\n ' ' + str(max_frac_step_deviation))\n raise ValueError('first data column (assumed to be '\n 'time) is not regularly spaced')\n\n first_value = array[0]\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip saving memmap, chan info for time col\n\n else:\n # not a time column\n\n self.logger.debug('gain: {}, offset: {}'.format(gain, offset))\n self.logger.debug('initial data: {}'.format(\n array[:5] * gain + offset))\n\n # channel_info will be cast to _signal_channel_dtype\n channel_info = (\n name, str(i), 1 / sampling_period, f.byte_order + dtype,\n units, gain, offset, '0')\n\n self.logger.debug('channel_info: {}'.format(channel_info))\n self.logger.debug('')\n\n sig_memmaps.append(array)\n sig_channels.append(channel_info)\n\n # END COLUMNS\n ##############################################\n\n # initialize lists for events and epochs\n raw_event_timestamps = []\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n event_labels = []\n epoch_labels = []\n\n # the remainder of the file may contain metadata, events and epochs\n try:\n\n ##############################################\n # COMMENT\n\n self.logger.debug('== COMMENT ==')\n\n comment = f.read_f('S')\n self.info['comment'] = comment\n\n self.logger.debug(comment if comment else 'no comment!')\n self.logger.debug('')\n\n ##############################################\n # NOTES\n\n self.logger.debug('== NOTES ==')\n\n notes = f.read_f('S')\n self.info['notes'] = notes\n\n self.logger.debug(notes if notes else 'no notes!')\n self.logger.debug('')\n\n ##############################################\n # TRACES\n\n self.logger.debug('== TRACES ==')\n\n n_traces = f.read_f('l')\n self.info['n_traces'] = n_traces\n\n self.logger.debug('n_traces: {}'.format(n_traces))\n self.logger.debug('')\n\n trace_header_info_list = {}\n group_ids = []\n for i in range(n_traces):\n\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n self.logger.debug('== TRACE #{} =='.format(i + 1))\n\n trace_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n trace_header_info['trace_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n trace_header_info['trace_header_version'] = \\\n f.read_f('l')\n\n if trace_header_info['trace_header_version'] == 1:\n TraceHeaderDescription = TraceHeaderDescriptionV1\n elif trace_header_info['trace_header_version'] == 2:\n TraceHeaderDescription = TraceHeaderDescriptionV2\n else:\n raise NotImplementedError(\n 'unimplemented trace header version \"{}\"!'.format(\n trace_header_info['trace_header_version']))\n\n for key, fmt in TraceHeaderDescription:\n trace_header_info[key] = f.read_f(fmt)\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n trace_header_info_list[i + 1] = trace_header_info\n group_ids.append(\n trace_header_info['group_id_for_this_trace'])\n\n self.logger.debug(trace_header_info)\n self.logger.debug('')\n self.info['trace_header_info_list'] = trace_header_info_list\n\n ##############################################\n # GROUPS\n\n self.logger.debug('== GROUPS ==')\n\n n_groups = f.read_f('l')\n self.info['n_groups'] = n_groups\n group_ids = \\\n np.sort(list(set(group_ids))) # remove duplicates and sort\n assert n_groups == len(group_ids), \\\n 'expected group_ids to have length {}: {}'.format(\n n_groups, group_ids)\n\n self.logger.debug('n_groups: {}'.format(n_groups))\n self.logger.debug('group_ids: {}'.format(group_ids))\n self.logger.debug('')\n\n group_header_info_list = {}\n for i in group_ids:\n\n # AxoGraph groups are 0-indexed in GUI, so use i below\n self.logger.debug('== GROUP #{} =='.format(i))\n\n group_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n group_header_info['group_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n group_header_info['group_header_version'] = \\\n f.read_f('l')\n\n if group_header_info['group_header_version'] == 1:\n GroupHeaderDescription = GroupHeaderDescriptionV1\n else:\n raise NotImplementedError(\n 'unimplemented group header version \"{}\"!'.format(\n group_header_info['group_header_version']))\n\n for key, fmt in GroupHeaderDescription:\n group_header_info[key] = f.read_f(fmt)\n # AxoGraph groups are 0-indexed in GUI, so use i below\n group_header_info_list[i] = group_header_info\n\n self.logger.debug(group_header_info)\n self.logger.debug('')\n self.info['group_header_info_list'] = group_header_info_list\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 1 <<')\n\n # 36 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('9l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPISODES\n\n self.logger.debug('== EPISODES ==')\n\n # a subset of episodes can be selected for \"review\", or\n # episodes can be paged through one by one, and the indexes of\n # those currently in review appear in this list\n episodes_in_review = []\n n_episodes = f.read_f('l')\n self.info['n_episodes'] = n_episodes\n for i in range(n_episodes):\n episode_bool = f.read_f('Z')\n if episode_bool:\n episodes_in_review.append(i + 1)\n self.info['episodes_in_review'] = episodes_in_review\n\n self.logger.debug('n_episodes: {}'.format(n_episodes))\n self.logger.debug('episodes_in_review: {}'.format(\n episodes_in_review))\n\n if format_ver == 5:\n\n # the test file for version 5 contains this extra list of\n # episode indexes with unknown purpose\n old_unknown_episode_list = []\n n_episodes2 = f.read_f('l')\n for i in range(n_episodes2):\n episode_bool = f.read_f('Z')\n if episode_bool:\n old_unknown_episode_list.append(i + 1)\n\n self.logger.debug('old_unknown_episode_list: {}'.format(\n old_unknown_episode_list))\n if n_episodes2 != n_episodes:\n self.logger.debug(\n 'n_episodes2 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes2, n_episodes))\n\n # another list of episode indexes with unknown purpose\n unknown_episode_list = []\n n_episodes3 = f.read_f('l')\n for i in range(n_episodes3):\n episode_bool = f.read_f('Z')\n if episode_bool:\n unknown_episode_list.append(i + 1)\n\n self.logger.debug('unknown_episode_list: {}'.format(\n unknown_episode_list))\n if n_episodes3 != n_episodes:\n self.logger.debug(\n 'n_episodes3 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes3, n_episodes))\n\n # episodes can be masked to be removed from the pool of\n # reviewable episodes completely until unmasked, and the\n # indexes of those currently masked appear in this list\n masked_episodes = []\n n_episodes4 = f.read_f('l')\n for i in range(n_episodes4):\n episode_bool = f.read_f('Z')\n if episode_bool:\n masked_episodes.append(i + 1)\n self.info['masked_episodes'] = masked_episodes\n\n self.logger.debug('masked_episodes: {}'.format(\n masked_episodes))\n if n_episodes4 != n_episodes:\n self.logger.debug(\n 'n_episodes4 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes4, n_episodes))\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 2 <<')\n\n # 68 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('d 9l d 4l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # FONTS\n\n if format_ver >= 6:\n font_categories = ['axis titles', 'axis labels (ticks)',\n 'notes', 'graph title']\n else:\n # would need an old version of AxoGraph to determine how it\n # used these settings\n font_categories = ['everything (?)']\n\n font_settings_info_list = {}\n for i in font_categories:\n\n self.logger.debug('== FONT SETTINGS FOR {} =='.format(i))\n\n font_settings_info = {}\n for key, fmt in FontSettingsDescription:\n font_settings_info[key] = f.read_f(fmt)\n\n # I don't know why two arbitrary values were selected to\n # represent this switch, but it seems they were\n # - setting1 could contain other undeciphered data as a\n # bitmask, like setting2\n assert font_settings_info['setting1'] in \\\n [FONT_BOLD, FONT_NOT_BOLD], \\\n 'expected setting1 ({}) to have value FONT_BOLD ' \\\n '({}) or FONT_NOT_BOLD ({})'.format(\n font_settings_info['setting1'],\n FONT_BOLD,\n FONT_NOT_BOLD)\n\n # size is stored 10 times bigger than real value\n font_settings_info['size'] = \\\n font_settings_info['size'] / 10.0\n font_settings_info['bold'] = \\\n bool(font_settings_info['setting1'] == FONT_BOLD)\n font_settings_info['italics'] = \\\n bool(font_settings_info['setting2'] & FONT_ITALICS)\n font_settings_info['underline'] = \\\n bool(font_settings_info['setting2'] & FONT_UNDERLINE)\n font_settings_info['strikeout'] = \\\n bool(font_settings_info['setting2'] & FONT_STRIKEOUT)\n font_settings_info_list[i] = font_settings_info\n\n self.logger.debug(font_settings_info)\n self.logger.debug('')\n self.info['font_settings_info_list'] = font_settings_info_list\n\n ##############################################\n # X-AXIS SETTINGS\n\n self.logger.debug('== X-AXIS SETTINGS ==')\n\n x_axis_settings_info = {}\n for key, fmt in XAxisSettingsDescription:\n x_axis_settings_info[key] = f.read_f(fmt)\n self.info['x_axis_settings_info'] = x_axis_settings_info\n\n self.logger.debug(x_axis_settings_info)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 3 <<')\n\n # 108 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('8l 3d 13l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EVENTS / TAGS\n\n self.logger.debug('=== EVENTS / TAGS ===')\n\n n_events, n_events_again = f.read_f('ll')\n self.info['n_events'] = n_events\n\n self.logger.debug('n_events: {}'.format(n_events))\n\n # event / tag timing is stored as an index into time\n raw_event_timestamps = []\n event_labels = []\n for i in range(n_events_again):\n event_index = f.read_f('l')\n raw_event_timestamps.append(event_index)\n n_events_yet_again = f.read_f('l')\n for i in range(n_events_yet_again):\n title = f.read_f('S')\n event_labels.append(title)\n\n event_list = []\n for event_label, event_index in \\\n zip(event_labels, raw_event_timestamps):\n # t_start shouldn't be added here\n event_time = event_index * sampling_period\n event_list.append({\n 'title': event_label,\n 'index': event_index,\n 'time': event_time})\n self.info['event_list'] = event_list\n for event in event_list:\n self.logger.debug(event)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 4 <<')\n\n # 28 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('7l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPOCHS / INTERVAL BARS\n\n self.logger.debug('=== EPOCHS / INTERVAL BARS ===')\n\n n_epochs = f.read_f('l')\n self.info['n_epochs'] = n_epochs\n\n self.logger.debug('n_epochs: {}'.format(n_epochs))\n\n epoch_list = []\n for i in range(n_epochs):\n epoch_info = {}\n for key, fmt in EpochInfoDescription:\n epoch_info[key] = f.read_f(fmt)\n epoch_list.append(epoch_info)\n self.info['epoch_list'] = epoch_list\n\n # epoch / interval bar timing and duration are stored in\n # seconds, so here they are converted to (possibly non-integer)\n # indexes into time to fit into the procrustean beds of\n # _rescale_event_timestamp and _rescale_epoch_duration\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n epoch_labels = []\n for epoch in epoch_list:\n raw_epoch_timestamps.append(\n epoch['t_start'] / sampling_period)\n raw_epoch_durations.append(\n (epoch['t_stop'] - epoch['t_start']) / sampling_period)\n epoch_labels.append(epoch['title'])\n self.logger.debug(epoch)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug(\n '>> UNKNOWN 5 (includes y-axis plot ranges) <<')\n\n # lots of undeciphered data\n rest_of_the_file = f.read()\n\n self.logger.debug(rest_of_the_file)\n self.logger.debug('')\n\n self.logger.debug('End of file reached (expected)')\n\n except EOFError as e:\n if format_ver == 1 or format_ver == 2:\n # for format versions 1 and 2, metadata like graph display\n # information was stored separately in the \"resource fork\"\n # of the file, so reaching the end of the file before all\n # metadata is parsed is expected\n self.logger.debug('End of file reached (expected)')\n pass\n else:\n # for format versions 3 and later, there should be metadata\n # stored at the end of the file, so warn that something may\n # have gone wrong, but try to continue anyway\n self.logger.warning('End of file reached unexpectedly '\n 'while parsing metadata, will attempt '\n 'to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n except UnicodeDecodeError as e:\n # warn that something went wrong with reading a string, but try\n # to continue anyway\n self.logger.warning('Problem decoding text while parsing '\n 'metadata, will ignore any remaining '\n 'metadata and attempt to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n self.logger.debug('')\n\n ##############################################\n # RAWIO HEADER\n\n # event_channels will be cast to _event_channel_dtype\n event_channels = []\n event_channels.append(('AxoGraph Tags', '', 'event'))\n event_channels.append(('AxoGraph Intervals', '', 'epoch'))\n\n if len(sig_channels) > 0:\n signal_streams = [('Signals', '0')]\n else:\n signal_streams = []\n\n # organize header\n self.header['nb_block'] = 1\n self.header['nb_segment'] = [1]\n self.header['signal_streams'] = np.array(signal_streams, dtype=_signal_stream_dtype)\n self.header['signal_channels'] = np.array(sig_channels, dtype=_signal_channel_dtype)\n self.header['event_channels'] = np.array(event_channels, dtype=_event_channel_dtype)\n self.header['spike_channels'] = np.array([], dtype=_spike_channel_dtype)\n\n ##############################################\n # DATA OBJECTS\n\n # organize data\n self._sampling_period = sampling_period\n self._t_start = t_start\n self._raw_signals = [sig_memmaps] # first index is seg_index\n self._raw_event_epoch_timestamps = [\n np.array(raw_event_timestamps),\n np.array(raw_epoch_timestamps)]\n self._raw_event_epoch_durations = [\n None,\n np.array(raw_epoch_durations)]\n self._event_epoch_labels = [\n np.array(event_labels, dtype='U'),\n np.array(epoch_labels, dtype='U')]",
"def read_log_airdata(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" AIR_DATA (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)), float(m.group(6)), \n float(m.group(7)), float(m.group(8))])\n return np.array(list_meas)",
"def test_convert_azfp_01a_matlab_raw(azfp_path):\n azfp_01a_path = azfp_path / '17082117.01A'\n azfp_xml_path = azfp_path / '17041823.XML'\n azfp_matlab_data_path = azfp_path / 'from_matlab/17082117_matlab_Data.mat'\n azfp_matlab_output_path = azfp_path / 'from_matlab/17082117_matlab_Output_Sv.mat'\n\n # Convert file\n echodata = open_raw(\n raw_file=azfp_01a_path, sonar_model='AZFP', xml_path=azfp_xml_path\n )\n\n # Read in the dataset that will be used to confirm working conversions. (Generated by Matlab)\n ds_matlab = loadmat(azfp_matlab_data_path)\n ds_matlab_output = loadmat(azfp_matlab_output_path)\n\n # Test beam group\n # frequency\n assert np.array_equal(\n ds_matlab['Data']['Freq'][0][0].squeeze(),\n echodata[\"Sonar/Beam_group1\"].frequency_nominal / 1000,\n ) # matlab file in kHz\n # backscatter count\n assert np.array_equal(\n np.array(\n [ds_matlab_output['Output'][0]['N'][fidx] for fidx in range(4)]\n ),\n echodata[\"Sonar/Beam_group1\"].backscatter_r.values,\n )\n\n # Test vendor group\n # Test temperature\n assert np.array_equal(\n np.array([d[4] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].ancillary.isel(ancillary_len=4).values,\n )\n assert np.array_equal(\n np.array([d[0] for d in ds_matlab['Data']['BatteryTx'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].battery_tx,\n )\n assert np.array_equal(\n np.array(\n [d[0] for d in ds_matlab['Data']['BatteryMain'][0]]\n ).squeeze(),\n echodata[\"Vendor_specific\"].battery_main,\n )\n # tilt x-y\n assert np.array_equal(\n np.array([d[0] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].tilt_x_count,\n )\n assert np.array_equal(\n np.array([d[1] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].tilt_y_count,\n )\n\n # check convention-required variables in the Platform group\n check_platform_required_scalar_vars(echodata)",
"def maf2vcf_mrefs(maf):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"a\"):\n ancallele = ''\n refout = ''\n line = next(maf)\n while line.startswith(\"s\"):\n if \"Wb\" in line:\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n chrom = aa[1].split(\".\")[1]\n if \"-\" in aa[4]:\n if aa[6] == 'A':\n rallele = 'T'\n elif aa[6] == 'T':\n rallele = 'A'\n elif aa[6] == 'C':\n rallele = 'G'\n elif aa[6] == 'G':\n rallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n pos_1 = size - pos\n else:\n pos_1 = pos\n rallele = aa[6]\n else:\n # read in other refs\n aa = line.split()\n refout += aa[1][0]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele += 'T'\n elif aa[6] == 'T':\n ancallele += 'A'\n elif aa[6] == 'C':\n ancallele += 'G'\n elif aa[6] == 'G':\n ancallele += 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n ancallele += aa[6]\n line = next(maf)\n if ancallele:\n f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(chrom, pos_1 + 1,\n rallele, ancallele,\n refout))\n else:\n pass\n return(None)"
] | [
"0.6293335",
"0.5960045",
"0.5839446",
"0.56938064",
"0.5470229",
"0.54585195",
"0.5442415",
"0.5383238",
"0.5345072",
"0.5325224",
"0.53108054",
"0.530532",
"0.5262298",
"0.5240521",
"0.5239216",
"0.5221165",
"0.5207952",
"0.51851124",
"0.51766455",
"0.51730996",
"0.5159004",
"0.51385146",
"0.5131645",
"0.5116151",
"0.5115011",
"0.5096909",
"0.5094884",
"0.50937426",
"0.50857115",
"0.5080156"
] | 0.7341478 | 0 |
Get a specific version of a paper's abstract metadata. | def _get_version(self, identifier: Identifier,
version: Optional[int] = None) -> DocMetadata:
parent_path = self._get_parent_path(identifier=identifier,
version=version)
path = os.path.join(parent_path,
(f'{identifier.filename}.abs' if not version
else f'{identifier.filename}v{version}.abs'))
return self.parse_abs_file(filename=path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_meta_from_remote(paper_id):\n # $ Query Paper\n paper = arxiv.query(id_list=[paper_id])[0]\n # $ Set the Arxiv Object to ensure Proper extraction\n return ArxivIdentity.from_arxiv_response(paper),paper",
"def get_abs(self, arxiv_id: str) -> DocMetadata:\n paper_id = Identifier(arxiv_id=arxiv_id)\n\n if paper_id.id in DELETED_PAPERS:\n raise AbsDeletedException(DELETED_PAPERS[paper_id.id])\n\n latest_version = self._get_version(identifier=paper_id)\n if not paper_id.has_version \\\n or paper_id.version == latest_version.version:\n return dataclasses.replace(latest_version,\n is_definitive=True,\n is_latest=True)\n\n try:\n this_version = self._get_version(identifier=paper_id,\n version=paper_id.version)\n except AbsNotFoundException as e:\n if paper_id.is_old_id:\n raise\n else:\n raise AbsVersionNotFoundException(e)\n\n # Several fields need to reflect the latest version's data\n combined_version: DocMetadata = dataclasses.replace(\n this_version,\n version_history=latest_version.version_history,\n categories=latest_version.categories,\n primary_category=latest_version.primary_category,\n secondary_categories=latest_version.secondary_categories,\n primary_archive=latest_version.primary_archive,\n primary_group=latest_version.primary_group,\n is_definitive=True,\n is_latest=False)\n\n return combined_version",
"def query_metadata(paper_id):\n dynamodb = boto3.client('dynamodb', region_name=MAIN_TABLE_ARN.region)\n key = {\n 'Partition': {\n 'S': 'metadata:%s' % paper_id\n }\n }\n response = dynamodb.get_item(\n TableName=MAIN_TABLE_ARN.resource,\n Key=key)\n return parse_metadata(response['Item'])",
"def get_paper_abstract(tree):\n\tpath = '//h2[text() = \"Abstract\"]/following-sibling::p/text()'\n\tabstract = tree.xpath(path)\n\t# If paper page contains the abstract, xpath returns a list with single string element\n\t# Access list to get the abstract string to return\n\tif abstract and abstract[0] != \"No abstract available.\":\n\t\tabstract = abstract[0]\n\t\n\treturn abstract",
"def GetMetadata(self):\n return self.dict['meta']",
"def get_metadata (self, name):\n return self.metadata.get(name)",
"def Abstract(self, default=None):\n tmp = self.data.get('metadata', {}).get('abstracts', [{}])[0]\n return tmp.get('value', default)",
"def getArticleMeta(docId):\n artMeta = None\n haveMedline = pubConf.mayResolveTextDir('medline')\n if haveMedline and not SKIPLOCALMEDLINE:\n artMeta = readLocalMedline(docId)\n if artMeta == None:\n artMeta = downloadPubmedMeta(docId)\n return artMeta",
"def get_metadata(self, docname, moderator):\n raise NotImplementedError()",
"def get_version():\n return about.get_version()",
"def get_abstract(doi):\n xml = download_article(doi)\n et = ET.fromstring(xml)\n coredata = et.find('article:coredata', elsevier_ns)\n abstract = coredata.find('dc:description', elsevier_ns)\n abs_text = abstract.text\n return abs_text",
"def get_meta(_id):\n dataset = ESDataset.get(id=_id, ignore=404, _source=\"_meta\")\n\n if dataset:\n return RegistryDocument.wraps(dataset).meta\n\n raise NoEntityError(f\"dataset {_id} does not exist.\")",
"def get_instance_metadata(version='latest', url='http://169.254.169.254'):\r\n return _get_instance_metadata('%s/%s/meta-data/' % (url, version))",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def view_specific_paper_version():\n paper = db.paper(request.args(0))\n if paper is None:\n session.flash = T('No such paper')\n redirect(URL('default', 'index'))\n form = SQLFORM(db.paper, record=paper, readonly=True)\n all_versions_link = A('All versions', _href=URL('default', 'view_paper_versions', args=[paper.paper_id]))\n return dict(form=form,\n all_versions_link=all_versions_link)",
"def get_attribute_replmetadata_version(self, dn, att):\n\n res = self.search(expression=\"distinguishedName=%s\" % dn,\n scope=ldb.SCOPE_SUBTREE,\n controls=[\"search_options:1:2\"],\n attrs=[\"replPropertyMetaData\"])\n if len(res) == 0:\n return None\n\n repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,\n res[0][\"replPropertyMetaData\"][0])\n ctr = repl.ctr\n if len(self.hash_oid_name.keys()) == 0:\n self._populate_oid_attid()\n for o in ctr.array:\n # Search for Description\n att_oid = self.get_oid_from_attid(o.attid)\n if att_oid in self.hash_oid_name and\\\n att.lower() == self.hash_oid_name[att_oid].lower():\n return o.version\n return None",
"def meta(self):\n return self.spec.meta",
"def meta(id):\n db = core.connect()\n return db[id][\"meta\"]",
"def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])",
"def extract_metadata(parser_config, snippet):\n return parser_config['implementation'](snippet)",
"def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta",
"def get_version(file_data):\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]",
"def get_version(self, dataset_name=None):\n if dataset_name is None:\n return self._version\n else:\n # resolve dataset name\n dataset = self.__getitem__(dataset_name)\n try:\n # dataset can be either an HDF5 dataset or numpy.ndarray\n version = dataset.attrs.get(\"version\")\n except AttributeError:\n version = None\n if version is None:\n version = self._version\n if isinstance(version, bytes):\n return version.decode() # for python3\n return version",
"def get_oa_version(doi):\n # If DOI is a link, truncate it\n if \"dx.doi.org\" in doi:\n doi = doi[doi.find(\"dx.doi.org\") + 11:]\n r = requests.get(\"http://beta.dissem.in/api/%s\" % (doi,))\n oa_url = None\n if r.status_code == requests.codes.ok:\n result = r.json()\n if(\"status\" in result and\n \"paper\" in result and\n result[\"status\"] == \"ok\" and\n \"pdf_url\" in result[\"paper\"]):\n oa_url = result[\"paper\"][\"pdf_url\"]\n return oa_url",
"def get(entity, name=None, version=None, lineage=None):",
"def read_versionInfo(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.versionInfo_read) ENABLED START #\n return self.attr_map[\"versionInfo\"]\n # PROTECTED REGION END # // SdpMasterLeafNode.versionInfo_read",
"def get_metadata_v3(session):\n LOG.debug(\"Exporting metadata for SFS augur build\")\n\n metadata = datastore.fetch_rows_from_table(session, (\"shipping\", \"metadata_for_augur_build_v3\"))\n\n return Response((row[0] + '\\n' for row in metadata), mimetype=\"application/x-ndjson\")",
"def _get_version(self):",
"def get_metadata():\n\n module = __name__.split('.', 1)\n\n pkg = pkg_resources.get_distribution(module[0])\n meta = {\n 'Name': None,\n 'Version': None,\n 'Summary': None,\n 'Home-page': None,\n 'Author': None,\n 'Author-email': None,\n 'License': None,\n }\n\n for line in pkg.get_metadata_lines(\"PKG-INFO\"):\n for par in meta:\n if line.startswith(par + \":\"):\n _, value = line.split(\": \", 1)\n meta[par] = value\n\n return meta",
"def get_version_info() -> Tuple[Text, Text]:"
] | [
"0.6080023",
"0.60320914",
"0.5905042",
"0.58196634",
"0.575648",
"0.5752106",
"0.56954294",
"0.5672822",
"0.56069934",
"0.5584644",
"0.54881644",
"0.54842895",
"0.54152155",
"0.5414221",
"0.5410572",
"0.54053754",
"0.5396079",
"0.53368646",
"0.5315691",
"0.52807045",
"0.5266874",
"0.5264593",
"0.52578527",
"0.52429247",
"0.52419627",
"0.5217576",
"0.52134717",
"0.5200225",
"0.51979434",
"0.5191412"
] | 0.6086258 | 0 |
Get the absolute parent path of the provided identifier. | def _get_parent_path(self, identifier: Identifier,
version: Optional[int] = None) -> str:
parent_path = os.path.join(
(self.latest_versions_path if not version
else self.original_versions_path),
('arxiv' if not identifier.is_old_id or identifier.archive is None
else identifier.archive),
'papers',
identifier.yymm,
)
return parent_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_parent(self, id_) -> str:\n return list(self._nodes[id_]['parents'].keys())[0]",
"def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))",
"def get_parent_id_from_trace_id():\n trace_id = get_trace_id()\n return trace_id.parent_id",
"def get_parent(path):\n\n # security check\n parent = os.path.dirname(path)\n\n try:\n get_abspath(parent)\n except:\n parent = ''\n\n return parent",
"def get_parent(self, the_id: str) -> str:\n\n parents = self.parent_types[the_id]\n return parents[1] if len(parents) > 1 else ''",
"def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.resource.container\n if parent_path != \"/\":\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)",
"def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret",
"def parent(self):\n if self._path == sep:\n return None\n elif self._parent is None:\n self._parent = Path(first(split(self._path)))\n return self._parent\n else:\n return self._parent",
"def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.collection.container\n if parent_path not in ('/', 'null'):\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)",
"def parent_dir(self):\n parent = os.path.dirname(self.dirn)\n if self.is_subdir:\n parent = os.path.basename(parent)\n else:\n if self.platform is not None and parent.endswith(self.platform):\n parent = parent[:-len(self.platform)].rstrip(os.sep)\n if self.year is not None and parent.endswith(str(year)):\n parent = parent[:-len(str(year))].rstrip(os.sep)\n return parent",
"def get_parent_dir(path):\n return os.path.dirname(path)",
"def parent_id(self) -> str:\n return self._db_data.parent_id",
"def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]",
"def get_parent_dir(path):\n\n return os.path.abspath(os.path.join(path, os.pardir))",
"def parent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"parent_id\")",
"def parent_address(self):\n address = self.address\n if address.startswith(\"/\"):\n address = address[1:]\n if address.endswith(\"/\"):\n address = address[:-1]\n\n if \"/\" in address:\n # Return everything before the last / sign\n return address.rsplit(\"/\", 1)[0]\n elif address:\n return \"\"\n else:\n return None",
"def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)",
"def get_parent_uid(klass, uid):\n if '.' not in uid:\n raise Exception(\"Can't get parent of id: {}\".format(uid))\n return '.'.join(uid.split('.')[1:])",
"def get_parent_uid(klass, uid):\n if '.' not in uid:\n raise Exception(\"Can't get parent of id: {}\".format(uid))\n return '.'.join(uid.split('.')[1:])",
"def get_parent_id(self):\n return self._parent_id",
"def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")",
"def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")",
"def path(self):\n if self.parent and self.parent.category_id:\n return self.parent.path + '/' + self.basename\n return self.basename",
"def context_parent_id(self) -> str | None:\n return bytes_to_ulid_or_none(self.context_parent_id_bin)",
"def get_path(path, parent=None, prj=None):\n if prj is None:\n prj = QgsProject.instance()\n\n if parent is None:\n parent = prj.layerTreeRoot()\n\n if path is None:\n return parent\n if not isinstance(path, (list, tuple)):\n path = path.split(\"/\")\n\n for part in path:\n if len(path) > 0:\n parent = get_group(part, parent)\n\n return parent",
"def parent_id(self):\n return self._parent_id",
"def parent_id(self):\n return self._parent_id",
"def parent(self, key):\n parent_key = '.'.join(key.split('.')[:-1])\n if not parent_key:\n return None\n else:\n return self[parent_key]",
"def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path",
"def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path"
] | [
"0.74234164",
"0.7001471",
"0.698503",
"0.69271344",
"0.69162315",
"0.6713711",
"0.6697498",
"0.66670144",
"0.66431475",
"0.66178703",
"0.6597941",
"0.6594848",
"0.6549766",
"0.6548474",
"0.6538243",
"0.6464383",
"0.64623785",
"0.64418644",
"0.64418644",
"0.63819927",
"0.63514864",
"0.63514864",
"0.63245034",
"0.62740463",
"0.62679803",
"0.6258298",
"0.6258298",
"0.6245833",
"0.62396634",
"0.6237572"
] | 0.7567351 | 0 |
Parse the version entries from the arXiv .abs file. | def _parse_version_entries(arxiv_id: str, version_entry_list: List) \
-> Tuple[int, List[VersionEntry], str]:
version_count = 0
version_entries = list()
for parsed_version_entry in version_entry_list:
version_count += 1
date_match = RE_DATE_COMPONENTS.match(parsed_version_entry)
if not date_match:
raise AbsParsingException(
'Could not extract date components from date line.')
try:
sd = date_match.group('date')
submitted_date = parser.parse(date_match.group('date'))
except (ValueError, TypeError):
raise AbsParsingException(
f'Could not parse submitted date {sd} as datetime')
source_type = SourceType(code=date_match.group('source_type'))
ve = VersionEntry(
raw=date_match.group(0),
source_type=source_type,
size_kilobytes=int(date_match.group('size_kilobytes')),
submitted_date=submitted_date,
version=version_count
)
version_entries.append(ve)
return (
version_count,
version_entries,
f"{arxiv_id}v"
f"{version_entries[-1].version}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_abs_file(filename: str) -> DocMetadata:\n try:\n with open(filename, mode='r', encoding='latin-1') as absf:\n raw = absf.read()\n except FileNotFoundError:\n raise AbsNotFoundException\n except UnicodeDecodeError as e:\n # TODO: log this\n raise AbsParsingException(\n f'Failed to decode .abs file \"{filename}\": {e}')\n\n # TODO: clean up\n modified = datetime.fromtimestamp(\n os.path.getmtime(filename), tz=gettz('US/Eastern'))\n modified = modified.astimezone(tz=tzutc())\n\n # there are two main components to an .abs file that contain data,\n # but the split must always return four components\n components = RE_ABS_COMPONENTS.split(raw)\n if len(components) > 4:\n components = alt_component_split(components)\n if not len(components) == 4:\n raise AbsParsingException(\n 'Unexpected number of components parsed from .abs.')\n\n # everything else is in the second main component\n prehistory, misc_fields = re.split(r'\\n\\n', components[1])\n\n fields: Dict[str, Any] = \\\n AbsMetaSession._parse_metadata_fields(key_value_block=misc_fields)\n\n # abstract is the first main component\n fields['abstract'] = components[2]\n\n id_match = RE_ARXIV_ID_FROM_PREHISTORY.match(prehistory)\n\n if not id_match:\n raise AbsParsingException(\n 'Could not extract arXiv ID from prehistory component.')\n\n arxiv_id = id_match.group('arxiv_id')\n\n prehistory = re.sub(r'^.*\\n', '', prehistory)\n parsed_version_entries = re.split(r'\\n', prehistory)\n\n # submitter data\n from_match = RE_FROM_FIELD.match(parsed_version_entries.pop(0))\n if not from_match:\n raise AbsParsingException('Could not extract submitter data.')\n name = from_match.group('name')\n if name is not None:\n name = name.rstrip()\n email = from_match.group('email')\n\n # get the version history for this particular version of the document\n if not len(parsed_version_entries) >= 1:\n raise AbsParsingException('At least one version entry expected.')\n\n (version, version_history, arxiv_id_v) \\\n = AbsMetaSession._parse_version_entries(\n arxiv_id=arxiv_id,\n version_entry_list=parsed_version_entries)\n\n arxiv_identifier = Identifier(arxiv_id=arxiv_id)\n\n # named (key-value) fields\n if not all(rf in fields for rf in REQUIRED_FIELDS):\n raise AbsParsingException(f'missing required field(s)')\n\n # some transformations\n category_list: List[str] = []\n primary_category = None\n\n if 'categories' in fields and fields['categories']:\n category_list = fields['categories'].split()\n if category_list[0] in taxonomy.CATEGORIES:\n primary_category = Category(category_list[0])\n primary_archive = \\\n Archive(\n taxonomy.CATEGORIES[primary_category.id]['in_archive'])\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n else:\n raise AbsException('Cannot infer archive from identifier.')\n\n doc_license: License = \\\n License() if 'license' not in fields else License(\n recorded_uri=fields['license'])\n raw_safe = re.sub(RE_FROM_FIELD, r'\\g<from>\\g<name>', raw, 1)\n\n return DocMetadata(\n raw_safe=raw_safe,\n arxiv_id=arxiv_id,\n arxiv_id_v=arxiv_id_v,\n arxiv_identifier=Identifier(arxiv_id=arxiv_id),\n title=fields['title'],\n abstract=fields['abstract'],\n authors=AuthorList(fields['authors']),\n submitter=Submitter(name=name, email=email),\n categories=fields['categories'] if 'categories' in fields else None,\n primary_category=primary_category,\n primary_archive=primary_archive,\n primary_group=Group(\n taxonomy.ARCHIVES[primary_archive.id]['in_group']),\n secondary_categories=[\n Category(x) for x in category_list[1:]\n if (category_list and len(category_list) > 1)\n ],\n journal_ref=None if 'journal_ref' not in fields\n else fields['journal_ref'],\n report_num=None if 'report_num' not in fields\n else fields['report_num'],\n doi=None if 'doi' not in fields else fields['doi'],\n acm_class=None if 'acm_class' not in fields else\n fields['acm_class'],\n msc_class=None if 'msc_class' not in fields else\n fields['msc_class'],\n proxy=None if 'proxy' not in fields else fields['proxy'],\n comments=fields['comments'] if 'comments' in fields else None,\n version=version,\n license=doc_license,\n version_history=version_history,\n modified=modified\n # private=private # TODO, not implemented\n )",
"def test_parse_version(self):\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B110SPC003'),\n [100, 1, 0, 110, 3],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012'),\n [100, 1, 0, 60, 12],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),\n [100, 1, 0, 60, 12],\n )\n # Incorrect number of digits\n self.assertEqual(\n _parse_sw_version('BaiStation_V10R001C00B060SPC012'),\n None,\n )\n self.assertEqual(\n _parse_sw_version('XYZ123'),\n None,\n )\n self.assertEqual(\n _parse_sw_version(''),\n None,\n )",
"def load_version_information() -> None:\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()",
"def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)",
"def iddversiontuple(afile):\n\n def versiontuple(vers):\n \"\"\"version tuple\"\"\"\n return tuple([int(num) for num in vers.split(\".\")])\n\n try:\n fhandle = open(afile, \"rb\")\n except TypeError:\n fhandle = afile\n line1 = fhandle.readline()\n try:\n line1 = line1.decode(\"ISO-8859-2\")\n except AttributeError:\n pass\n line = line1.strip()\n if line1 == \"\":\n return (0,)\n vers = line.split()[-1]\n return versiontuple(vers)",
"def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)",
"def test_new_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('1202.1234v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v12'))",
"def parse_version(version):\n return [int(num) for num in version.split('.')]",
"def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)",
"def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file",
"def split_ver(v):\n return [int(x) for x in v.split('.')]",
"def loadVersionMap():\n\tlines = readVersionList(cfg.FILE_VERSION)\n\tver_map = {}\n\tval = []\n\tflag = False\n\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tif line.startswith(cfg.FLAG_TIPS):\n\t\t\tline_list = line.split()\n\t\t\tlen_row = len(line_list)\n\t\t\ta_DmnNum = {}\n\t\t\tDOMAIN = cfg.FLAG_NULL\n\n\t\t\tfor i in range(0,len_row):\n\t\t\t\tDOMAIN = line_list[i]\n\t\t\t\ta_DmnNum[DOMAIN] = i\n\t\t\tval = line_list\n\t\telif line.startswith(cfg.OPEN_BRACKET):\n\t\t\tleft = line.find(cfg.OPEN_BRACKET)\n\t\t\tright = line.find(cfg.CLOSE_BRACKET)\n\t\t\tName = line[left+1:right].strip()\n\t\t\tver_map[Name] = []\n\t\t\tver_map[Name].append(val[1:])\n\t\telif not line:\n\t\t\tcontinue\n\t\telse:\n\t\t\tline_list = line.split()\n\t\t\tver_map[Name].append(line_list)\n\tsortVersion(ver_map)\n\treturn ver_map",
"def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()",
"def get_version_info() -> Tuple[Text, Text]:",
"def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]",
"def parse_version(header, data):\n log = unpack('<I', data)\n game, save = unpack('<7sxf', header)\n if save == -1:\n save = unpack('<I', header)\n if save == 37:\n save = 37.0\n else:\n save /= (1<<16)\n version = get_version(game.decode('ascii'), round(save, 2), log)\n return version, game.decode('ascii'), round(save, 2), log",
"def test_version(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.version == \"1.0\"\n\n bb = parse_input(\"name testname\\nversion 1.12\")\n assert bb.version == \"1.12\"",
"def extract(self, inputfile, line):\n\n # Extract the version number and optionally the version\n # control info.\n if any(version_trigger in line for version_trigger in (\"Q-Chem\", \"Unrecognized platform\", \"Version\")):\n # Part 1 matches\n # - `Q-Chem 4.3.0 for Intel X86 EM64T Linux`\n # Part 2 matches\n # - `Unrecognized platform!!! 4.0.0.1`\n # Part 3 matches\n # - `Intel X86 EM64T Linux Version 4.1.0.1 `\n # but not\n # - `Additional authors for Version 3.1:`\n # - `Q-Chem, Version 4.1, Q-Chem, Inc., Pittsburgh, PA (2013).`\n match = re.search(\n r\"Q-Chem\\s([\\d\\.]*)\\sfor|\"\n r\"Unrecognized platform!!!\\s([\\d\\.]*)\\b|\"\n r\"Version\\s([\\d\\.]*)\\s*$\",\n line\n )\n if match:\n groups = [s for s in match.groups() if s is not None]\n assert len(groups) == 1\n package_version = groups[0]\n self.metadata[\"package_version\"] = package_version\n self.metadata[\"legacy_package_version\"] = package_version\n self.set_attribute(\"parsed_svn_revision\", False)\n # Avoid \"Last SVN revision\" entry.\n if \"SVN revision\" in line and \"Last\" not in line:\n svn_revision = line.split()[3]\n line = next(inputfile)\n svn_branch = line.split()[3].replace(\"/\", \"_\")\n if (\n \"package_version\" in self.metadata\n and hasattr(self, \"parsed_svn_revision\")\n and not self.parsed_svn_revision\n ):\n self.metadata[\n \"package_version\"\n ] = f\"{self.metadata['package_version']}dev+{svn_branch}-{svn_revision}\"\n parsed_version = parse_version(self.metadata[\"package_version\"])\n assert isinstance(parsed_version, Version)\n self.set_attribute(\"package_version\", parsed_version)\n self.set_attribute(\"parsed_svn_revision\", True)\n\n # Disable/enable parsing for fragment sections.\n if any(message in line for message in self.fragment_section_headers):\n self.is_fragment_section = True\n if any(message in line for message in self.supersystem_section_headers):\n self.is_fragment_section = False\n\n if not self.is_fragment_section:\n\n # If the input section is repeated back, parse the $rem and\n # $molecule sections.\n if line[0:11] == 'User input:':\n self.user_input = dict()\n self.skip_line(inputfile, 'd')\n while list(set(line.strip())) != ['-']:\n\n if line.strip().lower() == '$rem':\n\n self.user_input['rem'] = dict()\n\n while line.strip().lower() != '$end':\n\n line = next(inputfile).lower()\n if line.strip() == '$end':\n break\n # Apparently calculations can run without\n # a matching $end...this terminates the\n # user input section no matter what.\n if line.strip() == ('-' * 62):\n break\n\n tokens = line.split()\n # Allow blank lines.\n if len(tokens) == 0:\n continue\n # Entries may be separated by an equals\n # sign, and can have comments, for example:\n # ecp gen\n # ecp = gen\n # ecp gen ! only on first chlorine\n # ecp = gen only on first chlorine\n assert len(tokens) >= 2\n keyword = tokens[0]\n if tokens[1] == '=':\n option = tokens[2]\n else:\n option = tokens[1]\n self.user_input['rem'][keyword] = option\n\n if keyword == 'method':\n method = option.upper()\n if method in self.wfn_method:\n self.metadata[\"methods\"].append(method)\n else:\n self.metadata[\"methods\"].append('DFT')\n self.metadata[\"functional\"] = method\n\n if keyword == 'exchange':\n self.metadata[\"methods\"].append('DFT')\n self.metadata[\"functional\"] = option\n\n if keyword == 'print_orbitals':\n # Stay with the default value if a number isn't\n # specified.\n if option in ('true', 'false'):\n continue\n else:\n norbdisp_aonames = int(option)\n self.norbdisp_alpha_aonames = norbdisp_aonames\n self.norbdisp_beta_aonames = norbdisp_aonames\n\n if line.strip().lower() == '$ecp':\n\n self.user_input['ecp'] = []\n line = next(inputfile)\n\n while line.strip().lower() != '$end':\n\n while list(set(line.strip())) != ['*']:\n\n # Parse the element for this ECP\n # entry. If only the element is on\n # this line, or the 2nd token is 0, it\n # applies to all atoms; if it's > 0,\n # then it indexes (1-based) that\n # specific atom in the whole molecule.\n tokens = line.split()\n assert len(tokens) > 0\n element = tokens[0][0].upper() + tokens[0][1:].lower()\n assert element in self.table.element\n if len(tokens) > 1:\n assert len(tokens) == 2\n index = int(tokens[1]) - 1\n else:\n index = -1\n line = next(inputfile)\n\n # Next comes the ECP definition. If\n # the line contains only a single\n # item, it's a built-in ECP, otherwise\n # it's a full definition.\n tokens = line.split()\n if len(tokens) == 1:\n ncore = 0\n line = next(inputfile)\n else:\n assert len(tokens) == 3\n ncore = int(tokens[2])\n # Don't parse the remainder of the\n # ECP definition.\n while list(set(line.strip())) != ['*']:\n line = next(inputfile)\n\n entry = (element, index, ncore)\n self.user_input['ecp'].append(entry)\n\n line = next(inputfile)\n\n if line.strip().lower() == '$end':\n break\n\n if line.strip().lower() == '$molecule':\n\n self.user_input['molecule'] = dict()\n line = next(inputfile)\n\n # Don't read the molecule, only the\n # supersystem charge and multiplicity.\n if line.split()[0].lower() == 'read':\n pass\n else:\n charge, mult = [int(x) for x in line.split()]\n self.user_input['molecule']['charge'] = charge\n self.user_input['molecule']['mult'] = mult\n\n # Parsing of general sections.\n if line.startswith(\"$\") and line.strip() != \"$end\":\n section_name = line.strip()[1:].lower()\n section = list()\n line = next(inputfile)\n while line.strip().lower() != \"$end\":\n section.append(line)\n line = next(inputfile)\n self.user_input[section_name] = section\n\n line = next(inputfile).lower()\n\n # Point group symmetry.\n if 'Molecular Point Group' in line:\n point_group_full = line.split()[3].lower()\n self.metadata['symmetry_detected'] = point_group_full\n line = next(inputfile)\n if 'Largest Abelian Subgroup' in line:\n point_group_abelian = line.split()[3].lower()\n self.metadata['symmetry_used'] = point_group_abelian\n\n # Parse the basis set name\n if 'Requested basis set' in line:\n self.metadata[\"basis_set\"] = line.split()[-1]\n\n # Parse the general basis for `gbasis`, in the style used by\n # Gaussian.\n if 'Basis set in general basis input format:' in line:\n self.skip_lines(inputfile, ['d', '$basis'])\n line = next(inputfile)\n if not hasattr(self, 'gbasis'):\n self.gbasis = []\n # The end of the general basis block.\n while '$end' not in line:\n atom = []\n # 1. Contains element symbol and atomic index of\n # basis functions; if 0, applies to all atoms of\n # same element.\n assert len(line.split()) == 2\n line = next(inputfile)\n # The end of each atomic block.\n while '****' not in line:\n # 2. Contains the type of basis function {S, SP,\n # P, D, F, G, H, ...}, the number of primitives,\n # and the weight of the final contracted function.\n bfsplitline = line.split()\n assert len(bfsplitline) == 3\n bftype = bfsplitline[0]\n nprim = int(bfsplitline[1])\n line = next(inputfile)\n # 3. The primitive basis functions that compose\n # the contracted basis function; there are `nprim`\n # of them. The first value is the exponent, and\n # the second value is the contraction\n # coefficient. If `bftype == 'SP'`, the primitives\n # are for both S- and P-type basis functions but\n # with separate contraction coefficients,\n # resulting in three columns.\n if bftype == 'SP':\n primitives_S = []\n primitives_P = []\n else:\n primitives = []\n # For each primitive in the contracted basis\n # function...\n for iprim in range(nprim):\n primsplitline = line.split()\n exponent = float(primsplitline[0])\n if bftype == 'SP':\n assert len(primsplitline) == 3\n coefficient_S = float(primsplitline[1])\n coefficient_P = float(primsplitline[2])\n primitives_S.append((exponent, coefficient_S))\n primitives_P.append((exponent, coefficient_P))\n else:\n assert len(primsplitline) == 2\n coefficient = float(primsplitline[1])\n primitives.append((exponent, coefficient))\n line = next(inputfile)\n if bftype == 'SP':\n bf_S = ('S', primitives_S)\n bf_P = ('P', primitives_P)\n atom.append(bf_S)\n atom.append(bf_P)\n else:\n bf = (bftype, primitives)\n atom.append(bf)\n # Move to the next contracted basis function\n # as long as we don't hit the '****' atom\n # delimiter.\n self.gbasis.append(atom)\n line = next(inputfile)\n\n if line.strip() == 'The following effective core potentials will be applied':\n\n # Keep track of all elements that may have an ECP on\n # them. *Which* centers have an ECP can't be\n # determined here, so just take the number of valence\n # electrons, then later later figure out the centers\n # and do core = Z - valence.\n self.possible_ecps = dict()\n # This will fail if an element has more than one kind\n # of ECP.\n\n split_fixed = utils.WidthSplitter((4, 13, 20, 2, 14, 14))\n\n self.skip_lines(inputfile, ['d', 'header', 'header', 'd'])\n line = next(inputfile)\n while list(set(line.strip())) != ['-']:\n tokens = split_fixed.split(line)\n if tokens[0] != '':\n element = tokens[0]\n valence = int(tokens[1])\n ncore = self.table.number[element] - valence\n self.possible_ecps[element] = ncore\n line = next(inputfile)\n\n # Solvation via SMD. This usually appears twice, both times\n # before knowing which underlying PCM is used, so we can't say\n # anything about the complete model yet.\n if line.strip() == \"Citation of the SMD model:\":\n self.solvent_model_is_smd = True\n\n if \"solvent model\" in line:\n groups = self.re_solvent_model.search(line).groups()\n solvent_model = groups[0]\n if solvent_model == \"C-PCM\":\n feps_form = groups[1]\n if feps_form == \"(eps-1)/(eps+0.5)\":\n solvent_model = \"CPCM-COSMO\"\n elif feps_form == \"(eps-1)/eps\":\n solvent_model = \"CPCM\"\n else:\n self.logger.warning(\n \"Cannot parse this form of f(eps) for PCM, assume CPCM: %s\", feps_form\n )\n elif solvent_model == \"IEF-PCM\":\n solvent_model = \"IEFPCM\"\n elif solvent_model == \"SS(V)PE\":\n solvent_model = \"SS(V)PE\"\n else:\n self.logger.warning(\n \"Unknown PCM-based solvent model, setting it as-is: %s\", solvent_model\n )\n if self.solvent_model_is_smd:\n solvent_model = f\"SMD-{solvent_model}\"\n self.metadata[\"solvent_model\"] = solvent_model\n\n # Aside from f(eps) for some models, older versions don't\n # print the dielectric in the output, only in the echoed\n # input.\n if \"solvent\" in self.user_input:\n for input_line in self.user_input[\"solvent\"]:\n tokens = input_line.split()\n key = tokens[0].lower()\n if key == \"dielectric\":\n # TODO obviate the need for this idiom\n if \"solvent_params\" not in self.metadata:\n self.metadata[\"solvent_params\"] = dict()\n self.metadata[\"solvent_params\"][\"epsilon\"] = float(tokens[1])\n elif key == \"opticaldielectric\":\n if \"solvent_params\" not in self.metadata:\n self.metadata[\"solvent_params\"] = dict()\n self.metadata[\"solvent_params\"][\"epsilon_infinite\"] = float(tokens[1])\n\n if line.strip() == \"==== cosmo data ====\":\n self.metadata[\"solvent_model\"] = \"COSMO\"\n while line.strip() != \"=== end cosmo data ===\":\n line = next(inputfile)\n if line.startswith(\"eps\"):\n if \"solvent_params\" not in self.metadata:\n self.metadata[\"solvent_params\"] = dict()\n self.metadata[\"solvent_params\"][\"epsilon\"] = float(line.split()[2])\n\n if 'TIME STEP #' in line:\n tokens = line.split()\n self.append_attribute('time', float(tokens[8]))\n\n if line.strip() == \"Adding empirical dispersion correction\":\n while \"energy\" not in line:\n line = next(inputfile)\n self.append_attribute(\n \"dispersionenergies\",\n utils.convertor(utils.float(line.split()[-2]), \"hartree\", \"eV\")\n )\n\n # Extract the atomic numbers and coordinates of the atoms.\n if 'Standard Nuclear Orientation' in line:\n if \"Angstroms\" in line:\n convertor = lambda x: x\n elif 'Bohr' in line:\n convertor = lambda x: utils.convertor(x, 'bohr', 'Angstrom')\n else:\n raise ValueError(f\"Unknown units in coordinate header: {line}\")\n self.skip_lines(inputfile, ['cols', 'dashes'])\n atomelements = []\n atomcoords = []\n line = next(inputfile)\n while list(set(line.strip())) != ['-']:\n entry = line.split()\n atomelements.append(entry[1])\n atomcoords.append([convertor(float(value)) for value in entry[2:]])\n line = next(inputfile)\n\n self.append_attribute('atomcoords', atomcoords)\n\n # We calculate and handle atomnos no matter what, since in\n # the case of fragment calculations the atoms may change,\n # along with the charge and spin multiplicity.\n self.atomnos = []\n self.atomelements = []\n for atomelement in atomelements:\n self.atomelements.append(atomelement)\n if atomelement == 'GH':\n self.atomnos.append(0)\n else:\n self.atomnos.append(self.table.number[atomelement])\n self.natom = len(self.atomnos)\n self.atommap = self.generate_atom_map()\n self.formula_histogram = self.generate_formula_histogram()\n\n # Number of electrons.\n # Useful for determining the number of occupied/virtual orbitals.\n if 'Nuclear Repulsion Energy' in line:\n line = next(inputfile)\n nelec_re_string = r'There are(\\s+[0-9]+) alpha and(\\s+[0-9]+) beta electrons'\n match = re.findall(nelec_re_string, line.strip())\n self.set_attribute('nalpha', int(match[0][0].strip()))\n self.set_attribute('nbeta', int(match[0][1].strip()))\n self.norbdisp_alpha += self.nalpha\n self.norbdisp_alpha_aonames += self.nalpha\n self.norbdisp_beta += self.nbeta\n self.norbdisp_beta_aonames += self.nbeta\n # Calculate the spin multiplicity (2S + 1), where S is the\n # total spin of the system.\n S = (self.nalpha - self.nbeta) / 2\n mult = int(2 * S + 1)\n self.set_attribute('mult', mult)\n # Calculate the molecular charge as the difference between\n # the atomic numbers and the number of electrons.\n if hasattr(self, 'atomnos'):\n charge = sum(self.atomnos) - (self.nalpha + self.nbeta)\n self.set_attribute('charge', charge)\n\n # Number of basis functions.\n if 'basis functions' in line:\n if not hasattr(self, 'nbasis'):\n self.set_attribute('nbasis', int(line.split()[-3]))\n # In the case that there are fewer basis functions\n # (and therefore MOs) than default number of MOs\n # displayed, reset the display values.\n self.norbdisp_alpha = min(self.norbdisp_alpha, self.nbasis)\n self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nbasis)\n self.norbdisp_beta = min(self.norbdisp_beta, self.nbasis)\n self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nbasis)\n\n # Finally, versions of Q-Chem greater than 5.1.2 print all MOs in\n # the \"Final <Spin> MO Coefficients\" blocks, but *not* the\n # \"MOLECULAR ORBITAL COEFFICIENTS\" blocks.\n if hasattr(self, \"package_version\"):\n pv = self.package_version\n if pv.major >= 5 and pv.minor > 1:\n norbdisp = None\n if hasattr(self, \"nmo\"):\n norbdisp = self.nmo\n elif hasattr(self, \"nbasis\"):\n norbdisp = self.nbasis\n if norbdisp is not None:\n self.norbdisp_alpha = norbdisp\n self.norbdisp_beta = norbdisp\n\n if \"Using C-PCM dielectric factor\" in line:\n # If not using a named solvent, the (static) dielectric factor\n # epsilon has to be parsed from f(eps) here. Don't use\n # \"C-PCM\" as the name of the model, since the proper name will\n # appear later (C-PCM, IEF-PCM, SS(V)PE, ...).\n tokens = line.split()\n assert len(tokens) == 9\n feps_form = tokens[6]\n feps = float(tokens[8])\n eps = None\n if feps_form == \"(eps-1)/(eps+0.5)\":\n eps = ((-0.5 * feps) - 1) / (feps - 1)\n elif feps_form == \"(eps-1)/eps\":\n eps = 1.0 / (1.0 - feps)\n else:\n self.logger.warning(\"Cannot parse this form of f(eps) for PCM: %s\", feps_form)\n if eps is not None:\n if \"solvent_params\" not in self.metadata:\n self.metadata[\"solvent_params\"] = dict()\n self.metadata[\"solvent_params\"][\"epsilon\"] = eps\n\n # Check for whether or not we're peforming an\n # (un)restricted calculation.\n if 'calculation will be' in line:\n if ' restricted' in line:\n self.unrestricted = False\n if 'unrestricted' in line:\n self.unrestricted = True\n if hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):\n if self.nalpha != self.nbeta:\n self.unrestricted = True\n self.is_rohf = True\n\n # Section with SCF iterations goes like this:\n #\n # SCF converges when DIIS error is below 1.0E-05\n # ---------------------------------------\n # Cycle Energy DIIS Error\n # ---------------------------------------\n # 1 -381.9238072190 1.39E-01\n # 2 -382.2937212775 3.10E-03\n # 3 -382.2939780242 3.37E-03\n # ...\n #\n scf_success_messages = (\n 'Convergence criterion met',\n 'corrected energy'\n )\n scf_failure_messages = (\n 'SCF failed to converge',\n 'Convergence failure'\n )\n if 'SCF converges when ' in line:\n if not hasattr(self, 'scftargets'):\n self.scftargets = []\n target = float(line.split()[-1])\n self.scftargets.append([target])\n\n # We should have the header between dashes now,\n # but sometimes there are lines before the first dashes.\n while not 'Cycle Energy' in line:\n line = next(inputfile)\n self.skip_line(inputfile, 'd')\n\n values = []\n iter_counter = 1\n line = next(inputfile)\n while not any(message in line for message in scf_success_messages):\n\n # Some trickery to avoid a lot of printing that can occur\n # between each SCF iteration.\n entry = line.split()\n if len(entry) > 0:\n if entry[0] == str(iter_counter):\n # Q-Chem only outputs one error metric.\n error = float(entry[2])\n values.append([error])\n iter_counter += 1\n\n try:\n line = next(inputfile)\n # Is this the end of the file for some reason?\n except StopIteration:\n self.logger.warning(\n f\"File terminated before end of last SCF! Last error: {error}\"\n )\n break\n\n # We've converged, but still need the last iteration.\n if any(message in line for message in scf_success_messages):\n entry = line.split()\n error = float(entry[2])\n values.append([error])\n iter_counter += 1\n\n # This is printed in regression QChem4.2/dvb_sp_unconverged.out\n # so use it to bail out when convergence fails.\n if any(message in line for message in scf_failure_messages):\n break\n\n if not hasattr(self, 'scfvalues'):\n self.scfvalues = []\n self.scfvalues.append(numpy.array(values))\n\n # Molecular orbital coefficients.\n\n # Try parsing them from this block (which comes from\n # `scf_final_print = 2``) rather than the combined\n # aonames/mocoeffs/moenergies block (which comes from\n # `print_orbitals = true`).\n if 'Final Alpha MO Coefficients' in line:\n if not hasattr(self, 'mocoeffs'):\n self.mocoeffs = []\n mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_alpha, self.ncolsblock)\n self.mocoeffs.append(mocoeffs.transpose())\n\n if 'Final Beta MO Coefficients' in line:\n mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_beta, self.ncolsblock)\n self.mocoeffs.append(mocoeffs.transpose())\n\n if 'Total energy in the final basis set' in line:\n if not hasattr(self, 'scfenergies'):\n self.scfenergies = []\n scfenergy = float(line.split()[-1])\n self.scfenergies.append(utils.convertor(scfenergy, 'hartree', 'eV'))\n\n # Geometry optimization.\n\n if 'Maximum Tolerance Cnvgd?' in line:\n line_g = next(inputfile).split()[1:3]\n line_d = next(inputfile).split()[1:3]\n line_e = next(inputfile).split()[2:4]\n\n if not hasattr(self, 'geotargets'):\n self.geotargets = [line_g[1], line_d[1], utils.float(line_e[1])]\n if not hasattr(self, 'geovalues'):\n self.geovalues = []\n maxg = utils.float(line_g[0])\n maxd = utils.float(line_d[0])\n ediff = utils.float(line_e[0])\n geovalues = [maxg, maxd, ediff]\n self.geovalues.append(geovalues)\n\n if '** OPTIMIZATION CONVERGED **' in line:\n if not hasattr(self, 'optdone'):\n self.optdone = []\n self.optdone.append(len(self.atomcoords))\n\n if '** MAXIMUM OPTIMIZATION CYCLES REACHED **' in line:\n if not hasattr(self, 'optdone'):\n self.optdone = []\n\n # Moller-Plesset corrections.\n\n # There are multiple modules in Q-Chem for calculating MPn energies:\n # cdman, ccman, and ccman2, all with different output.\n #\n # MP2, RI-MP2, and local MP2 all default to cdman, which has a simple\n # block of output after the regular SCF iterations.\n #\n # MP3 is handled by ccman2.\n #\n # MP4 and variants are handled by ccman.\n\n # This is the MP2/cdman case.\n if 'MP2 total energy' in line:\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mp2energy = float(line.split()[4])\n mp2energy = utils.convertor(mp2energy, 'hartree', 'eV')\n self.mpenergies.append([mp2energy])\n\n # This is the MP3/ccman2 case.\n if line[1:11] == 'MP2 energy' and line[12:19] != 'read as':\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mpenergies = []\n mp2energy = float(line.split()[3])\n mpenergies.append(mp2energy)\n line = next(inputfile)\n line = next(inputfile)\n # Just a safe check.\n if 'MP3 energy' in line:\n mp3energy = float(line.split()[3])\n mpenergies.append(mp3energy)\n mpenergies = [utils.convertor(mpe, 'hartree', 'eV')\n for mpe in mpenergies]\n self.mpenergies.append(mpenergies)\n\n # This is the MP4/ccman case.\n if 'EHF' in line:\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mpenergies = []\n\n while list(set(line.strip())) != ['-']:\n\n if 'EMP2' in line:\n mp2energy = float(line.split()[2])\n mpenergies.append(mp2energy)\n if 'EMP3' in line:\n mp3energy = float(line.split()[2])\n mpenergies.append(mp3energy)\n if 'EMP4SDQ' in line:\n mp4sdqenergy = float(line.split()[2])\n mpenergies.append(mp4sdqenergy)\n # This is really MP4SD(T)Q.\n if 'EMP4 ' in line:\n mp4sdtqenergy = float(line.split()[2])\n mpenergies.append(mp4sdtqenergy)\n\n line = next(inputfile)\n\n mpenergies = [utils.convertor(mpe, 'hartree', 'eV')\n for mpe in mpenergies]\n self.mpenergies.append(mpenergies)\n\n # Coupled cluster corrections.\n # Hopefully we only have to deal with ccman2 here.\n\n if 'CCD total energy' in line:\n if not hasattr(self, 'ccenergies'):\n self.ccenergies = []\n ccdenergy = float(line.split()[-1])\n ccdenergy = utils.convertor(ccdenergy, 'hartree', 'eV')\n self.ccenergies.append(ccdenergy)\n if 'CCSD total energy' in line:\n has_triples = False\n if not hasattr(self, 'ccenergies'):\n self.ccenergies = []\n ccsdenergy = float(line.split()[-1])\n # Make sure we aren't actually doing CCSD(T).\n line = next(inputfile)\n line = next(inputfile)\n if 'CCSD(T) total energy' in line:\n has_triples = True\n ccsdtenergy = float(line.split()[-1])\n ccsdtenergy = utils.convertor(ccsdtenergy, 'hartree', 'eV')\n self.ccenergies.append(ccsdtenergy)\n if not has_triples:\n ccsdenergy = utils.convertor(ccsdenergy, 'hartree', 'eV')\n self.ccenergies.append(ccsdenergy)\n\n if line[:11] == \" CCSD T1^2\":\n t1_squared = float(line.split()[3])\n t1_norm = math.sqrt(t1_squared)\n self.metadata[\"t1_diagnostic\"] = t1_norm / math.sqrt(2 * (self.nalpha + self.nbeta))\n\n # Electronic transitions. Works for both CIS and TDDFT.\n if 'Excitation Energies' in line:\n\n # Restricted:\n # ---------------------------------------------------\n # TDDFT/TDA Excitation Energies\n # ---------------------------------------------------\n #\n # Excited state 1: excitation energy (eV) = 3.6052\n # Total energy for state 1: -382.167872200685\n # Multiplicity: Triplet\n # Trans. Mom.: 0.0000 X 0.0000 Y 0.0000 Z\n # Strength : 0.0000\n # D( 33) --> V( 3) amplitude = 0.2618\n # D( 34) --> V( 2) amplitude = 0.2125\n # D( 35) --> V( 1) amplitude = 0.9266\n #\n # Unrestricted:\n # Excited state 2: excitation energy (eV) = 2.3156\n # Total energy for state 2: -381.980177630969\n # <S**2> : 0.7674\n # Trans. Mom.: -2.7680 X -0.1089 Y 0.0000 Z\n # Strength : 0.4353\n # S( 1) --> V( 1) amplitude = -0.3105 alpha\n # D( 34) --> S( 1) amplitude = 0.9322 beta\n\n self.skip_lines(inputfile, ['dashes', 'blank'])\n line = next(inputfile)\n\n etenergies = []\n etsyms = []\n etoscs = []\n etsecs = []\n spinmap = {'alpha': 0, 'beta': 1}\n\n while list(set(line.strip())) != ['-']:\n\n # Take the total energy for the state and subtract from the\n # ground state energy, rather than just the EE;\n # this will be more accurate.\n if 'Total energy for state' in line:\n energy = utils.convertor(float(line.split()[5]), 'hartree', 'wavenumber')\n etenergy = energy - utils.convertor(self.scfenergies[-1], 'eV', 'wavenumber')\n etenergies.append(etenergy)\n # if 'excitation energy' in line:\n # etenergy = utils.convertor(float(line.split()[-1]), 'eV', 'wavenumber')\n # etenergies.append(etenergy)\n if 'Multiplicity' in line:\n etsym = line.split()[1]\n etsyms.append(etsym)\n if 'Strength' in line:\n strength = float(line.split()[-1])\n etoscs.append(strength)\n\n # This is the list of transitions.\n if 'amplitude' in line:\n sec = []\n while line.strip() != '':\n re_match = self.re_tddft.search(line)\n if self.unrestricted:\n spin = spinmap[re_match.group(7)]\n else:\n spin = 0\n\n # There is a subtle difference between TDA and RPA calcs,\n # because in the latter case each transition line is\n # preceeded by the type of vector: X or Y, name excitation\n # or deexcitation (see #154 for details). For deexcitations,\n # we will need to reverse the MO indices. Note also that Q-Chem\n # starts reindexing virtual orbitals at 1.\n if line[5] == '(':\n ttype = 'X'\n else:\n assert line[5] == \":\"\n ttype = line[4]\n\n # get start and end indices of contribution\n # as the numbers written in parentheses:\n index_pattern = re.compile(r\"\\(( *\\d+)\\)\")\n indices=index_pattern.findall(line)\n #assert len(indices)==2 # there must always be a 'start' and 'end' index.\n\n if self.unrestricted:\n # Here are three different countings: \n # The 'D'oubly occupied orbitals,\n # the 'S'ingly occupied (i.e. index > self.nbeta) and\n # the 'V'irtual orbitals (index > self.nalpha)\n # from or to which the excitation can go:\n\n # this is supposed to be the standard case:\n n_minor=self.nbeta\n n_major=self.nalpha\n # but this also can appear \n if self.nbeta > self.nalpha:\n n_minor=self.nalpha\n n_major=self.nbeta\n\n # split 'line' by '(' to get three strings due to double occurence of '('.\n # From the first and second string (i.e. before the parentheses), take the last character.\n if re_match.group(1) == \"D\":\n startidx = int(indices[0]) - 1\n elif re_match.group(1) == \"S\":\n startidx = int(indices[0]) - 1 + n_minor\n assert startidx < n_major\n else:\n startidx=-15\n assert \"invalid from_occ\"\n\n if re_match.group(3) == \"S\":\n endidx = int(indices[1]) - 1 + n_minor\n assert endidx < n_major\n elif re_match.group(3) == \"V\":\n endidx = int(indices[1]) - 1 + n_major\n else:\n assert \"invalid to_occ\"\n\n else:\n startidx = int(re_match.group(2)) - 1\n endidx = int(re_match.group(4)) - 1 + self.nalpha\n\n contrib = float(re_match.group(5))\n\n start = (startidx, spin)\n end = (endidx, spin)\n if ttype == 'X':\n sec.append([start, end, contrib])\n elif ttype == 'Y':\n sec.append([end, start, contrib])\n else:\n raise ValueError(f\"Unknown transition type: {ttype}\")\n line = next(inputfile)\n etsecs.append(sec)\n\n line = next(inputfile)\n\n self.set_attribute('etenergies', etenergies)\n self.set_attribute('etsyms', etsyms)\n self.set_attribute('etoscs', etoscs)\n self.set_attribute('etsecs', etsecs)\n\n # Static and dynamic polarizability from mopropman.\n if 'Polarizability (a.u.)' in line:\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n while 'Full Tensor' not in line:\n line = next(inputfile)\n self.skip_line(inputfile, 'blank')\n polarizability = [next(inputfile).split() for _ in range(3)]\n self.polarizabilities.append(numpy.array(polarizability))\n\n # Static polarizability from finite difference or\n # responseman.\n if line.strip() in ('Static polarizability tensor [a.u.]',\n 'Polarizability tensor [a.u.]'):\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n polarizability = [next(inputfile).split() for _ in range(3)]\n self.polarizabilities.append(numpy.array(polarizability))\n\n # Molecular orbital energies and symmetries.\n if line.strip() == 'Orbital Energies (a.u.) and Symmetries':\n\n # --------------------------------------------------------------\n # Orbital Energies (a.u.) and Symmetries\n # --------------------------------------------------------------\n #\n # Alpha MOs, Restricted\n # -- Occupied --\n # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005\n # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag\n # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585\n # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag\n # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397\n # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag\n # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263\n # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg\n # -0.216 -0.198 -0.160\n # 2 Au 2 Bg 3 Bg\n # -- Virtual --\n # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365\n # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu\n # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539\n # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag\n # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806\n # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag\n # 0.816\n # 25 Bu\n #\n # Beta MOs, Restricted\n # -- Occupied --\n # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005\n # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag\n # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585\n # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag\n # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397\n # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag\n # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263\n # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg\n # -0.216 -0.198 -0.160\n # 2 Au 2 Bg 3 Bg\n # -- Virtual --\n # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365\n # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu\n # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539\n # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag\n # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806\n # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag\n # 0.816\n # 25 Bu\n # --------------------------------------------------------------\n\n self.skip_line(inputfile, 'dashes')\n line = next(inputfile)\n energies_alpha, symbols_alpha, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile)\n # Only look at the second block if doing an unrestricted calculation.\n # This might be a problem for ROHF/ROKS.\n if self.unrestricted:\n energies_beta, symbols_beta, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile)\n\n # For now, only keep the last set of MO energies, even though it is\n # printed at every step of geometry optimizations and fragment jobs.\n self.set_attribute('moenergies', [numpy.array(energies_alpha)])\n self.set_attribute('homos', [homo_alpha])\n self.set_attribute('mosyms', [symbols_alpha])\n if self.unrestricted:\n self.moenergies.append(numpy.array(energies_beta))\n self.homos.append(homo_beta)\n self.mosyms.append(symbols_beta)\n\n self.set_attribute('nmo', len(self.moenergies[0]))\n\n # Molecular orbital energies, no symmetries.\n if line.strip() == 'Orbital Energies (a.u.)':\n\n # In the case of no orbital symmetries, the beta spin block is not\n # present for restricted calculations.\n\n # --------------------------------------------------------------\n # Orbital Energies (a.u.)\n # --------------------------------------------------------------\n #\n # Alpha MOs\n # -- Occupied --\n # ******* -38.595 -34.580 -34.579 -34.578 -19.372 -19.372 -19.364\n # -19.363 -19.362 -19.362 -4.738 -3.252 -3.250 -3.250 -1.379\n # -1.371 -1.369 -1.365 -1.364 -1.362 -0.859 -0.855 -0.849\n # -0.846 -0.840 -0.836 -0.810 -0.759 -0.732 -0.729 -0.704\n # -0.701 -0.621 -0.610 -0.595 -0.587 -0.584 -0.578 -0.411\n # -0.403 -0.355 -0.354 -0.352\n # -- Virtual --\n # -0.201 -0.117 -0.099 -0.086 0.020 0.031 0.055 0.067\n # 0.075 0.082 0.086 0.092 0.096 0.105 0.114 0.148\n #\n # Beta MOs\n # -- Occupied --\n # ******* -38.561 -34.550 -34.549 -34.549 -19.375 -19.375 -19.367\n # -19.367 -19.365 -19.365 -4.605 -3.105 -3.103 -3.102 -1.385\n # -1.376 -1.376 -1.371 -1.370 -1.368 -0.863 -0.858 -0.853\n # -0.849 -0.843 -0.839 -0.818 -0.765 -0.738 -0.737 -0.706\n # -0.702 -0.624 -0.613 -0.600 -0.591 -0.588 -0.585 -0.291\n # -0.291 -0.288 -0.275\n # -- Virtual --\n # -0.139 -0.122 -0.103 0.003 0.014 0.049 0.049 0.059\n # 0.061 0.070 0.076 0.081 0.086 0.090 0.098 0.106\n # 0.138\n # --------------------------------------------------------------\n\n self.skip_line(inputfile, 'dashes')\n line = next(inputfile)\n energies_alpha, _, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile)\n # Only look at the second block if doing an unrestricted calculation.\n # This might be a problem for ROHF/ROKS.\n if self.unrestricted:\n energies_beta, _, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile)\n\n # For now, only keep the last set of MO energies, even though it is\n # printed at every step of geometry optimizations and fragment jobs.\n self.set_attribute('moenergies', [numpy.array(energies_alpha)])\n self.set_attribute('homos', [homo_alpha])\n if self.unrestricted:\n self.moenergies.append(numpy.array(energies_beta))\n self.homos.append(homo_beta)\n\n self.set_attribute('nmo', len(self.moenergies[0]))\n\n # Molecular orbital coefficients.\n\n # This block comes from `print_orbitals = true/{int}`. Less\n # precision than `scf_final_print >= 2` for `mocoeffs`, but\n # important for `aonames` and `atombasis`.\n\n if any(header in line\n for header in self.alpha_mo_coefficient_headers):\n\n # If we've asked to display more virtual orbitals than\n # there are MOs present in the molecule, fix that now.\n if hasattr(self, 'nmo') and hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):\n self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nmo)\n self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nmo)\n\n if not hasattr(self, 'mocoeffs'):\n self.mocoeffs = []\n if not hasattr(self, 'atombasis'):\n self.atombasis = []\n for n in range(self.natom):\n self.atombasis.append([])\n if not hasattr(self, 'aonames'):\n self.aonames = []\n # We could also attempt to parse `moenergies` here, but\n # nothing is gained by it.\n\n mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_alpha_aonames)\n # Only use these MO coefficients if we don't have them\n # from `scf_final_print`.\n if len(self.mocoeffs) == 0:\n self.mocoeffs.append(mocoeffs.transpose())\n\n # Go back through `aonames` to create `atombasis`.\n assert len(self.aonames) == self.nbasis\n for aoindex, aoname in enumerate(self.aonames):\n atomindex = int(self.re_atomindex.search(aoname).groups()[0]) - 1\n self.atombasis[atomindex].append(aoindex)\n assert len(self.atombasis) == len(self.atomnos)\n\n if 'BETA MOLECULAR ORBITAL COEFFICIENTS' in line:\n\n mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_beta_aonames)\n if len(self.mocoeffs) == 1:\n self.mocoeffs.append(mocoeffs.transpose())\n\n # Population analysis.\n\n if 'Ground-State Mulliken Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'mulliken')\n if 'Hirshfeld Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'hirshfeld')\n if 'Charge Model 5' in line:\n self.parse_charge_section(inputfile, 'cm5')\n if 'Ground-State ChElPG Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'chelpg')\n if 'Merz-Kollman ESP Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'esp')\n if 'Merz-Kollman RESP Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'resp')\n\n # Multipole moments are not printed in lexicographical order,\n # so we need to parse and sort them. The units seem OK, but there\n # is some uncertainty about the reference point and whether it\n # can be changed.\n #\n # Notice how the letter/coordinate labels change to coordinate ranks\n # after hexadecapole moments, and need to be translated. Additionally,\n # after 9-th order moments the ranks are not necessarily single digits\n # and so there are spaces between them.\n #\n # -----------------------------------------------------------------\n # Cartesian Multipole Moments\n # LMN = < X^L Y^M Z^N >\n # -----------------------------------------------------------------\n # Charge (ESU x 10^10)\n # 0.0000\n # Dipole Moment (Debye)\n # X 0.0000 Y 0.0000 Z 0.0000\n # Tot 0.0000\n # Quadrupole Moments (Debye-Ang)\n # XX -50.9647 XY -0.1100 YY -50.1441\n # XZ 0.0000 YZ 0.0000 ZZ -58.5742\n # ...\n # 5th-Order Moments (Debye-Ang^4)\n # 500 0.0159 410 -0.0010 320 0.0005\n # 230 0.0000 140 0.0005 050 0.0012\n # ...\n # -----------------------------------------------------------------\n #\n if \"Cartesian Multipole Moments\" in line:\n\n # This line appears not by default, but only when\n # `multipole_order` > 4:\n line = next(inputfile)\n if 'LMN = < X^L Y^M Z^N >' in line:\n line = next(inputfile)\n\n # The reference point is always the origin, although normally the molecule\n # is moved so that the center of charge is at the origin.\n self.reference = [0.0, 0.0, 0.0]\n self.moments = [self.reference]\n\n # Watch out! This charge is in statcoulombs without the exponent!\n # We should expect very good agreement, however Q-Chem prints\n # the charge only with 5 digits, so expect 1e-4 accuracy.\n charge_header = next(inputfile)\n assert charge_header.split()[0] == \"Charge\"\n charge = float(next(inputfile).strip())\n charge = utils.convertor(charge, 'statcoulomb', 'e') * 1e-10\n # Allow this to change until fragment jobs are properly implemented.\n # assert abs(charge - self.charge) < 1e-4\n\n # This will make sure Debyes are used (not sure if it can be changed).\n line = next(inputfile)\n assert line.strip() == \"Dipole Moment (Debye)\"\n\n while \"-----\" not in line:\n\n # The current multipole element will be gathered here.\n multipole = []\n\n line = next(inputfile)\n while (\"-----\" not in line) and (\"Moment\" not in line):\n\n cols = line.split()\n\n # The total (norm) is printed for dipole but not other multipoles.\n if cols[0] == 'Tot':\n line = next(inputfile)\n continue\n\n # Find and replace any 'stars' with NaN before moving on.\n for i in range(len(cols)):\n if '***' in cols[i]:\n cols[i] = numpy.nan\n\n # The moments come in pairs (label followed by value) up to the 9-th order,\n # although above hexadecapoles the labels are digits representing the rank\n # in each coordinate. Above the 9-th order, ranks are not always single digits,\n # so there are spaces between them, which means moments come in quartets.\n if len(self.moments) < 5:\n for i in range(len(cols)//2):\n lbl = cols[2*i]\n m = cols[2*i + 1]\n multipole.append([lbl, m])\n elif len(self.moments) < 10:\n for i in range(len(cols)//2):\n lbl = cols[2*i]\n lbl = 'X'*int(lbl[0]) + 'Y'*int(lbl[1]) + 'Z'*int(lbl[2])\n m = cols[2*i + 1]\n multipole.append([lbl, m])\n else:\n for i in range(len(cols)//4):\n lbl = 'X'*int(cols[4*i]) + 'Y'*int(cols[4*i + 1]) + 'Z'*int(cols[4*i + 2])\n m = cols[4*i + 3]\n multipole.append([lbl, m])\n\n line = next(inputfile)\n\n # Sort should use the first element when sorting lists,\n # so this should simply work, and afterwards we just need\n # to extract the second element in each list (the actual moment).\n multipole.sort()\n multipole = [m[1] for m in multipole]\n self.moments.append(multipole)\n\n # For `method = force` or geometry optimizations,\n # the gradient is printed.\n if any(header in line for header in self.gradient_headers):\n if not hasattr(self, 'grads'):\n self.grads = []\n if 'SCF' in line:\n ncolsblock = self.ncolsblock\n else:\n ncolsblock = 5\n grad = QChem.parse_matrix(inputfile, 3, self.natom, ncolsblock)\n self.grads.append(grad.T)\n\n # (Static) polarizability from frequency calculations.\n if 'Polarizability Matrix (a.u.)' in line:\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n polarizability = []\n self.skip_line(inputfile, 'index header')\n for _ in range(3):\n line = next(inputfile)\n ss = line.strip()[1:]\n polarizability.append([ss[0:12], ss[13:24], ss[25:36]])\n # For some reason the sign is inverted.\n self.polarizabilities.append(-numpy.array(polarizability, dtype=float))\n\n # For IR-related jobs, the Hessian is printed (dim: 3*natom, 3*natom).\n # Note that this is *not* the mass-weighted Hessian.\n if any(header in line for header in self.hessian_headers):\n dim = 3*self.natom\n self.set_attribute(\n \"hessian\", QChem.parse_matrix(inputfile, dim, dim, self.ncolsblock)\n )\n\n # Start of the IR/Raman frequency section.\n if 'VIBRATIONAL ANALYSIS' in line:\n\n vibfreqs = []\n vibfconsts = []\n vibrmasses = []\n vibirs = []\n vibramans = []\n vibdisps = []\n\n while 'STANDARD THERMODYNAMIC QUANTITIES' not in line:\n ## IR, optional Raman:\n #\n # **********************************************************************\n # ** **\n # ** VIBRATIONAL ANALYSIS **\n # ** -------------------- **\n # ** **\n # ** VIBRATIONAL FREQUENCIES (CM**-1) AND NORMAL MODES **\n # ** FORCE CONSTANTS (mDYN/ANGSTROM) AND REDUCED MASSES (AMU) **\n # ** INFRARED INTENSITIES (KM/MOL) **\n ##** RAMAN SCATTERING ACTIVITIES (A**4/AMU) AND DEPOLARIZATION RATIOS **\n # ** **\n # **********************************************************************\n #\n #\n # Mode: 1 2 3\n # Frequency: -106.88 -102.91 161.77\n # Force Cnst: 0.0185 0.0178 0.0380\n # Red. Mass: 2.7502 2.8542 2.4660\n # IR Active: NO YES YES\n # IR Intens: 0.000 0.000 0.419\n # Raman Active: YES NO NO\n ##Raman Intens: 2.048 0.000 0.000\n ##Depolar: 0.750 0.000 0.000\n # X Y Z X Y Z X Y Z\n # C 0.000 0.000 -0.100 -0.000 0.000 -0.070 -0.000 -0.000 -0.027\n # C 0.000 0.000 0.045 -0.000 0.000 -0.074 0.000 -0.000 -0.109\n # C 0.000 0.000 0.148 -0.000 -0.000 -0.074 0.000 0.000 -0.121\n # (...)\n # H -0.000 -0.000 0.422 -0.000 -0.000 0.499 0.000 0.000 -0.285\n # TransDip 0.000 -0.000 -0.000 0.000 -0.000 -0.000 -0.000 0.000 0.021\n #\n # Mode: 4 5 6\n # ...\n #\n # There isn't any symmetry information for normal modes present\n # in Q-Chem.\n # if not hasattr(self, 'vibsyms'):\n # self.vibsyms = []\n if 'Frequency:' in line:\n vibfreqs.extend(map(float, line.split()[1:]))\n\n if 'Force Cnst:' in line:\n vibfconsts.extend(map(float, line.split()[2:]))\n\n if 'Red. Mass' in line:\n vibrmasses.extend(map(float, line.split()[2:]))\n\n if 'IR Intens:' in line:\n vibirs.extend(map(float, line.split()[2:]))\n\n if 'Raman Intens:' in line:\n vibramans.extend(map(float, line.split()[2:]))\n\n # This is the start of the displacement block.\n if line.split()[0:3] == ['X', 'Y', 'Z']:\n disps = []\n for k in range(self.natom):\n line = next(inputfile)\n numbers = list(map(float, line.split()[1:]))\n N = len(numbers) // 3\n if not disps:\n for n in range(N):\n disps.append([])\n for n in range(N):\n disps[n].append(numbers[3*n:(3*n)+3])\n vibdisps.extend(disps)\n\n line = next(inputfile)\n\n # Anharmonic vibrational analysis.\n # Q-Chem includes 3 theories: VPT2, TOSH, and VCI.\n # For now, just take the VPT2 results.\n\n # if 'VIBRATIONAL ANHARMONIC ANALYSIS' in line:\n\n # while list(set(line.strip())) != ['=']:\n # if 'VPT2' in line:\n # if not hasattr(self, 'vibanharms'):\n # self.vibanharms = []\n # self.vibanharms.append(float(line.split()[-1]))\n # line = next(inputfile)\n\n if vibfreqs:\n self.set_attribute(\"vibfreqs\", vibfreqs)\n if vibfconsts:\n self.set_attribute(\"vibfconsts\", vibfconsts)\n if vibrmasses:\n self.set_attribute(\"vibrmasses\", vibrmasses)\n if vibirs:\n self.set_attribute(\"vibirs\", vibirs)\n if vibramans:\n self.set_attribute(\"vibramans\", vibramans)\n if vibdisps:\n self.set_attribute(\"vibdisps\", vibdisps)\n\n if 'STANDARD THERMODYNAMIC QUANTITIES AT' in line:\n\n if not hasattr(self, 'temperature'):\n self.temperature = float(line.split()[4])\n # Not supported yet.\n if not hasattr(self, 'pressure'):\n self.pressure = float(line.split()[7])\n self.skip_line(inputfile, 'blank')\n\n line = next(inputfile)\n if self.natom == 1:\n assert 'Translational Enthalpy' in line\n else:\n assert 'Imaginary Frequencies' in line\n line = next(inputfile)\n # Not supported yet.\n assert 'Zero point vibrational energy' in line\n if not hasattr(self, 'zpe'):\n # Convert from kcal/mol to Hartree/particle.\n self.zpve = utils.convertor(float(line.split()[4]),\n 'kcal/mol', 'hartree')\n atommasses = []\n while 'Translational Enthalpy' not in line:\n if 'Has Mass' in line:\n atommass = float(line.split()[6])\n atommasses.append(atommass)\n line = next(inputfile)\n if not hasattr(self, 'atommasses'):\n self.atommasses = numpy.array(atommasses)\n\n while line.strip():\n line = next(inputfile)\n\n line = next(inputfile)\n assert 'Total Enthalpy' in line\n if not hasattr(self, 'enthalpy'):\n enthalpy = float(line.split()[2])\n self.enthalpy = utils.convertor(enthalpy,\n 'kcal/mol', 'hartree')\n line = next(inputfile)\n assert 'Total Entropy' in line\n if not hasattr(self, 'entropy'):\n entropy = float(line.split()[2]) / 1000\n # This is the *temperature dependent* entropy.\n self.entropy = utils.convertor(entropy,\n 'kcal/mol', 'hartree')\n if not hasattr(self, 'freeenergy'):\n self.freeenergy = self.enthalpy - self.entropy * self.temperature\n\n # Extract total elapsed (wall) and CPU job times\n if line[:16] == ' Total job time:':\n self.metadata['success'] = True\n # create empty list for the times to be stored in\n if not \"wall_time\" in self.metadata:\n self.metadata['wall_time'] = []\n if not \"cpu_time\" in self.metadata:\n self.metadata['cpu_time'] = []\n # the line format is \" Total job time: 120.37s(wall), 2251.02s(cpu)\" at the end of each job ran. \n # first split the line by white space\n try:\n a = line.split()\n # next split the second to last entry at the 's' to pull wall time\n # cast as a float for use in timedelta data structure\n wall_td = datetime.timedelta(seconds=float(a[-2].split('s')[0]))\n # next split the last entry at the 's' to pull cpu time\n # cast as a float for use in timedelta data structure\n cpu_td = datetime.timedelta(seconds=float(a[-1].split('s')[0]))\n self.metadata['wall_time'].append(wall_td)\n self.metadata['cpu_time'].append(cpu_td)\n except:\n pass",
"def _parse(self, lines):\n global VELSCALE\n self.title = lines[0].strip()\n self.time = None\n\n try:\n words = lines[1].split()\n self.natom = int(words[0])\n except (IndexError, ValueError):\n raise TypeError('Unrecognized file type [%s]' % self.filename)\n\n if len(words) >= 2:\n self.time = float(words[1]) * units.picoseconds\n\n if len(lines) == int(ceil(self.natom / 2.0) + 2):\n hasbox = hasvels = False\n self.boxVectors = self.velocities = None\n elif self.natom in (1, 2) and len(lines) == 4:\n # This is the _only_ case where line counting does not work -- there\n # is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are\n # the title, natom/time, and coordinates. The 4th are almost always\n # velocities since Amber does not make it easy to make a periodic\n # system with only 2 atoms. If natom is 1, the 4th line is either a\n # velocity (3 #'s) or a box (6 #'s). If natom is 2, it is a bit\n # ambiguous. However, velocities (which are scaled by 20.445) have a\n # ~0% chance of being 60+, so we can pretty easily tell if the last\n # line has box dimensions and angles or velocities. I cannot\n # envision a _plausible_ scenario where the detection here will fail\n # in real life.\n line = lines[3]\n if self.natom == 1:\n tmp = [line[i:i+12] for i in range(0, 72, 12) if line[i:i+12]]\n if len(tmp) == 3:\n hasvels = True\n hasbox = False\n self.boxVectors = False\n elif len(tmp) == 6:\n hasbox = True\n hasvels = False\n self.velocities = None\n else:\n raise TypeError('Unrecognized line in restart file %s' %\n self.filename)\n else:\n # Ambiguous case\n tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]\n if any(tmp):\n hasbox = True\n hasvels = False\n self.velocities = False\n else:\n hasvels = True\n hasbox = False\n self.boxVectors = False\n elif len(lines) == int(ceil(self.natom / 2.0) + 3):\n hasbox = True\n hasvels = False\n self.velocities = None\n elif len(lines) == int(2 * ceil(self.natom / 2.0) + 2):\n hasbox = False\n self.boxVectors = None\n hasvels = True\n elif len(lines) == int(2 * ceil(self.natom / 2.0) + 3):\n hasbox = hasvels = True\n else:\n raise TypeError('Badly formatted restart file. Has %d lines '\n 'for %d atoms.' % (len(self.lines), self.natom))\n\n if self._asNumpy:\n coordinates = np.zeros((self.natom, 3), np.float32)\n if hasvels:\n velocities = np.zeros((self.natom, 3), np.float32)\n else:\n coordinates = [Vec3(0.0, 0.0, 0.0) for i in range(self.natom)]\n if hasvels:\n velocities = [Vec3(0.0, 0.0, 0.0) for i in range(self.natom)]\n\n # Now it's time to parse. Coordinates first\n startline = 2\n endline = startline + int(ceil(self.natom / 2.0))\n idx = 0\n for i in range(startline, endline):\n line = lines[i]\n x = float(line[ 0:12])\n y = float(line[12:24])\n z = float(line[24:36])\n coordinates[idx] = Vec3(x, y, z)\n idx += 1\n if idx < self.natom:\n x = float(line[36:48])\n y = float(line[48:60])\n z = float(line[60:72])\n coordinates[idx] = Vec3(x, y, z)\n idx += 1\n self.coordinates = units.Quantity(coordinates, units.angstroms)\n startline = endline\n # Now it's time to parse velocities if we have them\n if hasvels:\n endline = startline + int(ceil(self.natom / 2.0))\n idx = 0\n for i in range(startline, endline):\n line = lines[i]\n x = float(line[ 0:12]) * VELSCALE\n y = float(line[12:24]) * VELSCALE\n z = float(line[24:36]) * VELSCALE\n velocities[idx] = Vec3(x, y, z)\n idx += 1\n if idx < self.natom:\n x = float(line[36:48]) * VELSCALE\n y = float(line[48:60]) * VELSCALE\n z = float(line[60:72]) * VELSCALE\n velocities[idx] = Vec3(x, y, z)\n idx += 1\n startline = endline\n self.velocities = units.Quantity(velocities,\n units.angstroms/units.picoseconds)\n if hasbox:\n line = lines[startline]\n try:\n tmp = [float(line[i:i+12]) for i in range(0, 72, 12)]\n except (IndexError, ValueError):\n raise ValueError('Could not parse box line in %s' %\n self.filename)\n lengths = tmp[:3] * units.angstroms\n angles = tmp[3:] * units.degrees\n self.boxVectors = computePeriodicBoxVectors(lengths[0], lengths[1],\n lengths[2], angles[0], angles[1], angles[2])",
"def __init__(self, xml_text):\n logger.verbose(\"Load Version.xml\")\n self.parse(xml_text)",
"def parse_version(module_file):\n f = open(module_file)\n s = f.read()\n f.close()\n match = re.findall(\"__version__ = '([^']+)'\", s)\n return match[0]",
"def read_version(self, fname):\n version = 'unknown'\n lines = open(fname).readlines()\n for line in lines:\n if \" Version\" in line:\n version = line.split()[-2]\n break\n return version",
"def test_old_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('gr-qc/9901123v3'))",
"def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)",
"def get_versions(versions_file):\n with open(\"versions.txt\", \"r\") as f:\n return dict(line.strip().split(\"=\") for line in f)",
"def _read_etc(etc_file):\r\n with open(etc_file, 'rb') as f:\r\n f.seek(352) # end of header\r\n v1 = unpack('<i', f.read(4))[0]\r\n v2 = unpack('<i', f.read(4))[0]\r\n v3 = unpack('<i', f.read(4))[0] # always zero?\r\n v4_a = unpack('<h', f.read(2))[0] # they look like two values\r\n v4_b = unpack('<h', f.read(2))[0] # maybe this one is unsigned (H)\r\n\r\n f.seek(352) # end of header\r\n # lg.debug(hexlify(f.read(16)))\r\n return v1, v2, v3, (v4_a, v4_b)",
"def parse_update(self, file):\n\n self.new_hashes = []\n self.old_hashes = []\n parsed = self.parse_header(file.readline())\n if parsed:\n (type, version) = parsed\n self.log.debug(\"Received list type: %s, version: %s\" % (type, version))\n pattern = re.compile(HASH_REGEX)\n for line in file:\n m = pattern.search(line)\n if m:\n if m.group(1) == \"+\":\n self.new_hashes.append(m.group(2))\n elif m.group(1) == \"-\":\n self.old_hashes.append(m.group(2))\n\n self._version = int(version)\n else:\n raise SafeBrowsingUpdateError(\"Received bad/empty list, no changes made\")",
"def parse_version(raw_info):\n version_stamp = raw_info.split(\"\\n\")[0].split(\"Version \")[1]\n if version_stamp.startswith(\"AFNI\"):\n version_stamp = version_stamp.split(\"AFNI_\")[1]\n elif version_stamp.startswith(\"Debian\"):\n version_stamp = version_stamp.split(\"Debian-\")[1].split(\"~\")[0]\n else:\n return None\n\n version = LooseVersion(version_stamp.replace(\"_\", \".\")).version[:3]\n if version[0] < 1000:\n version[0] = version[0] + 2000\n return tuple(version)"
] | [
"0.6567073",
"0.5690683",
"0.56216645",
"0.5586731",
"0.5546465",
"0.55331504",
"0.5424544",
"0.5418142",
"0.5321359",
"0.5282709",
"0.52254856",
"0.5218742",
"0.52110106",
"0.5190335",
"0.51662725",
"0.5163971",
"0.5161085",
"0.51590484",
"0.5139128",
"0.5135782",
"0.51331985",
"0.5127512",
"0.51197654",
"0.51146924",
"0.5093569",
"0.509353",
"0.50868404",
"0.5069714",
"0.5066218",
"0.5065917"
] | 0.6053738 | 1 |
Get list of dissemination formats. | def get_dissemination_formats(docmeta: DocMetadata,
format_pref: Optional[str] = None,
add_sciencewise: bool = False) -> List:
return current_session().get_dissemination_formats(docmeta,
format_pref,
add_sciencewise) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def formats(self):\n logger.debug(\"Get formats\")\n return self._raw_api.formats.get()",
"def getFormats(self):\n return self.formats",
"def get_export_formats(self):\n return [f for f in self.formats if f().can_export()]",
"def export_formats(self):\n return list(self._export_formats)",
"def formats():\n return _FORMATS",
"def getFormatList(base_url):\n\n\tquery_url = base_url + \"/formats\"\n\trequest = urllib2.urlopen(query_url)\n\tresponse = request.read()\n\tresponse_xml = ET.fromstring(response)\n\n\tfmt_list = {}\n\n\tformats = response_xml.findall(\".//objectFormat\")\n\n\tfor f in formats:\n\t\tfmt_identifier = f.find(\"formatId\").text\n\t\tfmt_name = f.find(\"formatName\").text\n\t\tfmt_type = f.find(\"formatType\").text\n\t\tfmt_path = makeValidFormatPath(fmt_name)\n\n\t\tfmt_list[fmt_identifier] = { \"formatId\" : fmt_identifier, \"formatName\" : fmt_name, \"formatType\" : fmt_type, \"formatPath\" : fmt_path }\n\n\treturn fmt_list",
"def get_dissemination_formats(self,\n docmeta: DocMetadata,\n format_pref: Optional[str] = None,\n add_sciencewise: bool = False) -> List[str]:\n formats: List[str] = []\n\n # first, get possible list of formats based on available source file\n source_file_path = self._get_source_path(docmeta)\n source_file_formats: List[str] = []\n if source_file_path is not None:\n source_file_formats = \\\n formats_from_source_file_name(source_file_path)\n if source_file_formats:\n formats.extend(source_file_formats)\n\n if add_sciencewise:\n if formats and formats[-1] == 'other':\n formats.insert(-1, 'sciencewise_pdf')\n else:\n formats.append('sciencewise_pdf')\n\n else:\n # check source type from metadata, with consideration of\n # user format preference and cache\n version = docmeta.version\n format_code = docmeta.version_history[version - 1].source_type.code\n cached_ps_file_path = cache.get_cache_file_path(\n docmeta,\n 'ps')\n cache_flag = False\n if cached_ps_file_path \\\n and os.path.getsize(cached_ps_file_path) == 0 \\\n and source_file_path \\\n and os.path.getmtime(source_file_path) \\\n < os.path.getmtime(cached_ps_file_path):\n cache_flag = True\n\n source_type_formats = formats_from_source_type(format_code,\n format_pref,\n cache_flag,\n add_sciencewise)\n if source_type_formats:\n formats.extend(source_type_formats)\n\n return formats",
"def available_formats() -> List[str]:\n formats = [p.stem for p in Path(TEMPLATES_PATH).glob(f'*{TEMPLATE_SUFFIX}')]\n return formats",
"def get_import_formats(self):\n return [f for f in self.formats if f().can_import()]",
"def show_formats(self):\n\n # the link below is Ginger by Wizkid, you can replace it with any other\n # Youtube link\n # demo link 'https://www.youtube.com/watch?v=YSy2lBZ1QrA'\n # self.url = sys.argv[1]\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for format in self.result['formats']:\n format_id = format['format_id']\n filesize = size(format['filesize']\n ) if format['filesize'] else 0\n if format['ext'] == 'mp4':\n ext = format['ext']\n else:\n continue\n format_note = format['format_note']\n full_info = ' '.join([str('id=' + format_id), str(format_note),\n str(ext), str(filesize)])\n print(full_info)\n print()\n print(f\"Pick a format to download \\n {self.result['title']}\")\n\n self.request_id()",
"def build_list(self):\n kernel = suit.core.kernel.Kernel.getSingleton()\n session = kernel.session()\n \n # get available formats\n fmt_view = kernel.getRegisteredViewerFormats()\n fmt_edit = kernel.getRegisteredEditorFormats()\n \n # process formats to create map\n for fmt in fmt_view:\n title = session.get_idtf(fmt)\n if self._formats.has_key(title):\n continue \n self._formats[title] = (fmt, False)\n \n # check for edit\n for fmt in fmt_edit:\n title = session.get_idtf(fmt)\n self._formats[title] = (self._formats[title][0], True)\n \n # fill list with available information about formats\n self.types_list.removeAllItems()\n for title in self._formats.iterkeys():\n self.types_list.addItem(title)\n \n self.types_list.clearIndexSelected()",
"def formats():\n if PIL_ENABLED:\n return 'BMP', 'EPS', 'GIF', 'JPEG', 'MSP', 'PCX', 'PNG', 'SVG', 'TIFF', 'XBM'\n else:\n return 'EPS', 'SVG'",
"def getFormatsFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n item1 = item[1]\n if isinstance(item1, str):\n yield normalize_format(item1)\n else:\n l = []\n for j in getFormatsFromDescr(item1):\n l.append(j)\n yield l\n item = i.next()\n except StopIteration:\n pass",
"def get_formats(self):\n return tuple(self._names.keys())",
"def all_formats(cls):\n if cls._format_to_serializer is None:\n cls._register_subclasses()\n formats = ['auto']\n formats.extend(cls._format_to_serializer)\n return formats",
"def available_output_formats():\n output_formats = []\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n output_formats.append(\n v.load().OutputData.METADATA[\"driver_name\"])\n except AttributeError:\n pass\n return output_formats",
"def getSupportedFileFormats():\n return {\"Bitmap\":[\"*.bmp\", \"*.dib\"], \"JPEG\": [\"*.jpeg\", \"*.jpg\", \"*.jpe\"], \"JPEG 2000\": [\"*.jp2\"],\"Portable Network Graphics\" : [\"*.png\"], \"WebP\": [\"*.webp\"], \"Portable Image Formats\":[\"*.pbm\", \"*.pgm\", \"*.ppm\"], \"Sun Rasters\":[\"*.sr\", \"*.ras\"], \"TIFF Files\": [\"*.tiff\",\"*.tif\"] }",
"def available_input_formats():\n input_formats = []\n # Extensions.\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n input_formats.append(v.load().InputData.METADATA[\"driver_name\"])\n except ImportError:\n raise\n except Exception:\n pass\n return input_formats",
"def get_supported_formats(pandas = False):\n global _pma_debug\n url = \"https://host.pathomation.com/etc/supported_formats.php\"\n \n if _pma_debug == True:\n print(url)\n \n headers = {'Accept': 'application/json'}\n r = requests.get(url, headers=headers)\n json = r.json()\n \n if (pandas == True):\n import pandas as pd\n return pd.DataFrame.from_records(json, index=[\"vendor\"])\n else:\n return json",
"def _get_report_format_ids(self):\n self.logger.info('[INFO] Retrieving all available OpenVAS report formats...')\n params = {\n 'cmd': 'get_report_formats',\n 'token': self.token,\n }\n\n url = self.basename + \"/gmp\"\n\n r = self.helper._get_request(\n url, \n self.basic_auth, \n params, \n self.headers, \n self.cookies)\n\n if r.status_code == 200:\n xml_response = BeautifulSoup(r.content, 'lxml')\n formats_xml = xml_response.find_all('report_format')\n for report in formats_xml:\n if report.findChild('name', recursive=False).text == 'XML':\n self.xml_report_id = report.get('id')\n if report.findChild('name', recursive=False).text == 'CSV Results':\n self.csv_report_id = report.get('id')\n else:\n raise Exception('[FAIL] Could not get report formats from OpenVAS')\n print(self.csv_report_id)",
"def GetCaptureFileFormats(self): # real signature unknown; restored from __doc__\n pass",
"def output_formats(self) -> List[DataFormat]:\n return [DataFormat.NGEN_OUTPUT]",
"def LoadFileFormats(self):\n\n self.__api = FileFormatApi\n self.__apiexc = ApiException\n\n try:\n api_instance = self.__api(self.__engine.api_client)\n\n fileformats = paginator(\n api_instance,\n \"get_all_file_formats\")\n\n if fileformats.response_list:\n for c in fileformats.response_list:\n fileformat = DxFileFormat(self.__engine, existing_object=c)\n self.__filetypeList[c.file_format_id] = fileformat\n else:\n #print_error(\"No file formats found\")\n self.__logger.error(\"No file formats found\")\n\n except self.__apiexc as e:\n print_error(\"Can't load file formats %s\" % e.body)\n return None",
"def sox_get_supported_formats(self):\n formats = ['wav']\n result = self._process_command('sox -h', PIPE, supress_dry_run=True)\n matches = re.findall(RE_SOX_AUDIO_SUPPORT, result[1][0])\n\n if matches is not None:\n formats = matches[0].strip().split(' ')\n\n logging.debug('Sox supported audio formats: %s', formats)\n return formats",
"def listFeaturableContentTypes():",
"def list_extensions():\n formats = FileFormat.list_formats()\n return render_template('home.html', formats=formats)",
"def get_renderers(self, request):\n if self._format_override_parameter in request.REQUEST:\n formats = request.REQUEST[self._format_override_parameter].split(',')\n renderers, seen_formats = [], set()\n for format in formats:\n if format in self._renderers_by_format and format not in seen_formats:\n renderers.extend(self._renderers_by_format[format])\n elif request.META.get('HTTP_ACCEPT'):\n accepts = self.parse_accept_header(request.META['HTTP_ACCEPT'])\n renderers = MediaType.resolve(accepts, self._renderers)\n elif self._default_format:\n renderers = self._renderers_by_format[self._default_format]\n else:\n renderers = []\n if self._force_fallback_format:\n renderers.extend(self._renderers_by_format[self._force_fallback_format])\n return renderers",
"def testGetAllowedConversionFormatList(self):\n get = Handler.getAllowedConversionFormatList\n # Handled mimetypes\n self.assertEquals(get(\"text/html;ignored=param\"),\n [(\"application/pdf\", \"PDF - Portable Document Format\")])\n\n # Unhandled mimetypes\n self.assertEquals(get(\"application/pdf;ignored=param\"), [])",
"def get_column_formats(self):\n image_columns = self.image_column_nums()\n formats = [self.column_formats[i] if i not in image_columns else '%h' for i in range(len(self.column_formats))]\n return formats[1:-2]",
"def getFormatterDefs(self): #$NON-NLS-1$\r\n if self.formatterDefs is None:\r\n resouceReg = self.extensionPoint.getPlugin().getResourceRegistry()\r\n self.formatterDefs = []\r\n formatElems = self._getExtensionDefChildNodes(u\"plg:link-formatters/plg:link-formatter\") #$NON-NLS-1$\r\n for formatterElem in formatElems:\r\n formatterDef = ZLinkFormatterDef(formatterElem, resouceReg)\r\n self.formatterDefs.append(formatterDef)\r\n return self.formatterDefs"
] | [
"0.7676491",
"0.72095263",
"0.6843036",
"0.676619",
"0.6653835",
"0.664365",
"0.65041065",
"0.6443467",
"0.64138347",
"0.60489416",
"0.6047862",
"0.6045683",
"0.60069233",
"0.60045713",
"0.59716725",
"0.5901643",
"0.58269656",
"0.580406",
"0.57598495",
"0.56726515",
"0.5669241",
"0.5666896",
"0.55859554",
"0.55561143",
"0.5482688",
"0.5434376",
"0.5418582",
"0.5403171",
"0.5382667",
"0.5378251"
] | 0.745034 | 1 |
get the closest mesh triangles ids from a transomr position | def closestTriangleToTransform(transform, meshName):
faceVertices, points = meshData.getMeshData(meshName)
vertexFaces = meshData.getMeshVertexFaces(faceVertices)
point = np.array(cmds.xform(transform, q=1, ws=1, t=1), dtype=np.double)
return meshData.getClosestTriangle(point, points, vertexFaces, faceVertices) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MeshVtxAdjacentVtxs (strMesh, index, blnAbsolutConnections=False, blnCreate=False):\n \"\"\"custom function\"\"\"\n #-----------------------------------------------------------------------------------------------------------------------------------------\n def CullDuplicates(seq, idfun=None): \n # order preserving \n if idfun is None: \n def idfun(x): return x \n seen = {} \n result = [] \n for item in seq: \n marker = idfun(item) \n if marker in seen: continue \n seen[marker] = 1 \n result.append(item) \n return result\n #-----------------------------------------------------------------------------------------------------------------------------------------\n MeshVtxAdjacentVtxs = []\n if rs.IsMesh(strMesh)==False : \n print \"strMesh is not an mesh\"\n return None\n if type(index)==type(\"string\"):\n print \"index is not an integer\"\n return None\n if type(index)==type(0.1): index = int(index)\n\n arrVertices = rs.MeshVertices (strMesh)\n arrFaceVertices = rs.MeshFaceVertices(strMesh)\n\n intCount = 0\n arrAdjacentVtxs = []\n for arrFace in arrFaceVertices:\n blnIsAdjacent = False\n for arrVtxIndex in arrFace:\n if arrVtxIndex == index :\n blnIsAdjacent = True\n if blnIsAdjacent :\n if blnAbsolutConnections :\n if arrFace[2]==arrFace[3] :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex)\n else :\n if index == arrFace[0] :\n arrAdjacentVtxs.append( arrFace[3] )\n arrAdjacentVtxs.append( arrFace[1] )\n elif index == arrFace[1] :\n arrAdjacentVtxs.append( arrFace[0] )\n arrAdjacentVtxs.append( arrFace[2] )\n elif index == arrFace[2] :\n arrAdjacentVtxs.append( arrFace[1] )\n arrAdjacentVtxs.append( arrFace[3] )\n elif index == arrFace(3) :\n arrAdjacentVtxs.append( arrFace[2] )\n arrAdjacentVtxs.append( arrFace[0] )\n else :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex )\n if type(arrAdjacentVtxs) != type([]) : return None\n arrOrderAdjacentVtxs = CullDuplicates(arrAdjacentVtxs)\n if blnCreate :\n arrStrPts = []\n for arrVtxIndex in arrOrderAdjacentVtxs:\n rs.AddPoint ( arrVertices[arrVtxIndex] )\n arrStrPts.append( arrVertices[arrVtxIndex] )\n return arrStrPts\n else :\n return arrOrderAdjacentVtxs",
"def getProximity(tuples):\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1]",
"def getVertexPositions(transform_name):\n positions = pm.xform('{}.vtx[*]'.format(transform_name), q=True, ws=True, t=True)\n return zip(positions[0::3], positions[1::3], positions[2::3])",
"def getClosestVtxFromMeshComponent(mesh, objArray=None):\n mesh = pm.PyNode(mesh)\n meshShp = mesh.getShape()\n mySets = []\n\n if objArray is None:\n objArray = pm.selected()\n\n cPnt=pm.createNode('closestPointOnMesh')\n pm.connectAttr (meshShp+'.outMesh', cPnt+'.inMesh')\n\n pm.select(cl=1)\n for obj in objArray:\n objShp = obj.getShape()\n setName = '%s_vtxSet'%obj\n\n if pm.objExists(setName):\n pm.delete(setName)\n\n pm.sets(n = setName)\n\n vtxs=[]\n if objShp.type()=='mesh':\n c = 1\n for i in range(0, objShp.numVertices()):\n cPnt.inPosition.set( objShp.getPoint(i, space='world') )\n\n myVtx = pm.PyNode( '%s.vtx[%s]'%(mesh, cPnt.closestVertexIndex.get()) )\n\n if myVtx not in vtxs:\n vtxs.append(myVtx)\n\n pm.select(myVtx)\n pm.sets(setName, add=pm.selected())\n\n pm.delete(cPnt)\n\n pm.select(cl=1)\n pm.select(mySets)",
"def Triangulate(self, p_int, vtkIdList, vtkPoints):\n ...",
"def get_triangle(remote, objectid, triangleid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleIndices(objectid, triangleid)\n remote.runCommand(cmd1)\n t = mmapi.vec3i()\n cmd1.GetSceneCommandResult_GetTriangleIndices(key1, t)\n return (t.i, t.j, t.k)",
"def __get_adjacents_from_id(self, position):\n if position == 1: #Upper-left corner.\n return [position + 5, position + 1]\n elif position == 5: #Upper-right corner.\n return [position + 5, position - 1]\n elif position == 21: #Lower-left corner.\n return [position - 5, position + 1]\n elif position == 25: #Lower-right corner.\n return [position - 5, position - 1]\n elif position == 2 or position == 3 or position == 4: #Upper wall.\n return [position + 5, position - 1, position + 1]\n elif position == 10 or position == 15 or position == 20: #Right wall.\n return [position + 5, position - 5, position - 1]\n elif position == 6 or position == 11 or position == 16: #Left wall.\n return [position + 5, position - 5, position + 1]\n elif position == 22 or position == 23 or position == 24: #Bottom wall.\n return [position - 5, position - 1, position + 1]\n else: #All other positions.\n return [position - 5, position + 5, position - 1, position + 1]",
"def mesh_span_indices(self):\n self._ensure_mesh()\n k2m = self._knots_to_mesh\n return np.where(k2m[1:] != k2m[:-1])[0]",
"def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc",
"def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res",
"def FindClosestNPoints(self, p_int, , vtkIdList):\n ...",
"def get_triangles(self):\n location = TopLoc_Location()\n bt = BRep_Tool()\n facing = bt.Triangulation(self.topods_shape(), location)\n if facing == None:\n return [], []\n\n tab = facing.Nodes()\n tri = facing.Triangles()\n verts = []\n for i in range(1, facing.NbNodes() + 1):\n p = tab.Value(i).Transformed(location.Transformation())\n verts.append(np.array(list(p.Coord())))\n\n tris = []\n reversed = self.reversed()\n for i in range(1, facing.NbTriangles() + 1):\n # OCC triangle normals point in the surface normal\n # direction\n if reversed:\n index1, index3, index2 = tri.Value(i).Get()\n else:\n index1, index2, index3 = tri.Value(i).Get()\n\n tris.append([index1 - 1, index2 - 1, index3 - 1])\n\n return np.asarray(verts, dtype=np.float32), np.asarray(tris, dtype=np.int32)",
"def tri_interpolate_zcoords(points: np.ndarray, triangles: np.ndarray, mesh_points: np.ndarray,\n is_mesh_edge: np.ndarray, num_search_tris: int=10):\n # Get triangle centroid coordinates and create KD-tree.\n tri_coords = points[triangles,:]\n tri_coords2D = points[triangles,0:2]\n tri_centroids = np.mean(tri_coords2D, axis=1)\n tri_tree = scipy.spatial.cKDTree(tri_centroids)\n\n # Loop over points.\n coords2d = mesh_points[:,0:2]\n num_mesh_points = coords2d.shape[0]\n z = np.zeros(num_mesh_points, dtype=np.float64)\n for point_num in range(num_mesh_points):\n if not(is_mesh_edge[point_num]):\n z[point_num] = project_2d_coords(tri_coords, coords2d[point_num,:], tri_tree, num_search_tris=num_search_tris)\n\n return z",
"def get_element_for_location(self, points):\n verts = np.zeros((points.shape[0], 4, 3))\n bc = np.zeros((points.shape[0], 4))\n tetras = np.zeros(points.shape[0], dtype=\"int64\")\n inside = np.zeros(points.shape[0], dtype=bool)\n npts = 0\n npts_step = int(1e4)\n # break into blocks of 10k points\n while npts < points.shape[0]:\n\n cell_index = np.array(\n self.aabb_grid.position_to_cell_index(points[: npts + npts_step, :])\n )\n inside = self.aabb_grid.inside(points[: npts + npts_step, :])\n global_index = (\n cell_index[:, 0]\n + self.aabb_grid.nsteps_cells[None, 0] * cell_index[:, 1]\n + self.aabb_grid.nsteps_cells[None, 0]\n * self.aabb_grid.nsteps_cells[None, 1]\n * cell_index[:, 2]\n )\n\n tetra_indices = self.aabb_table[global_index[inside], :].tocoo()\n # tetra_indices[:] = -1\n row = tetra_indices.row\n col = tetra_indices.col\n # using returned indexes calculate barycentric coords to determine which tetra the points are in\n vertices = self.nodes[self.elements[col, :4]]\n pos = points[row, :]\n vap = pos[:, :] - vertices[:, 0, :]\n vbp = pos[:, :] - vertices[:, 1, :]\n # # vcp = p - points[:, 2, :]\n # # vdp = p - points[:, 3, :]\n vab = vertices[:, 1, :] - vertices[:, 0, :]\n vac = vertices[:, 2, :] - vertices[:, 0, :]\n vad = vertices[:, 3, :] - vertices[:, 0, :]\n vbc = vertices[:, 2, :] - vertices[:, 1, :]\n vbd = vertices[:, 3, :] - vertices[:, 1, :]\n\n va = np.einsum(\"ij, ij->i\", vbp, np.cross(vbd, vbc, axisa=1, axisb=1)) / 6.0\n vb = np.einsum(\"ij, ij->i\", vap, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n vc = np.einsum(\"ij, ij->i\", vap, np.cross(vad, vab, axisa=1, axisb=1)) / 6.0\n vd = np.einsum(\"ij, ij->i\", vap, np.cross(vab, vac, axisa=1, axisb=1)) / 6.0\n v = np.einsum(\"ij, ij->i\", vab, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n c = np.zeros((va.shape[0], 4))\n c[:, 0] = va / v\n c[:, 1] = vb / v\n c[:, 2] = vc / v\n c[:, 3] = vd / v\n # inside = np.ones(c.shape[0],dtype=bool)\n mask = np.all(c >= 0, axis=1)\n\n verts[: npts + npts_step, :, :][row[mask], :, :] = vertices[mask, :, :]\n bc[: npts + npts_step, :][row[mask], :] = c[mask, :]\n tetras[: npts + npts_step][row[mask]] = col[mask]\n inside[: npts + npts_step][row[mask]] = True\n npts += npts_step\n return verts, bc, tetras, inside",
"def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list",
"def getTargetPositions(rg):\n targetPositions = OrderedDict()\n for r in rg.robotDict.values():\n x, y, z = r.metFiberPos\n targetPositions[r.id] = [x, y]\n return targetPositions",
"def triangulate(points):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Compute Delauney\n triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)\n\n # Get vertices from result indexes\n triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]\n \n return triangles",
"def get_inside_point_ids(gui, ugrid, ugrid_flipped, model_name,\n representation='points'):\n nids = None\n points = ugrid.GetPointData()\n if points is None:\n return ugrid, nids\n\n ids = points.GetArray('Ids')\n if ids is None:\n return ugrid, nids\n\n # all points associated with the correctly selected cells are returned\n # but we get extra points for the cells that are inside and out\n point_ids = vtk_to_numpy(ids)\n nids = gui.get_node_ids(model_name, point_ids)\n\n # these are the points outside the box/frustum (and also include the bad point)\n points_flipped = ugrid_flipped.GetPointData()\n ids_flipped = points_flipped.GetArray('Ids')\n point_ids_flipped = vtk_to_numpy(ids_flipped)\n nids_flipped = gui.get_node_ids(model_name, point_ids_flipped)\n #nids = gui.gui.get_reverse_node_ids(model_name, point_ids_flipped)\n\n # setA - setB\n nids2 = np.setdiff1d(nids, nids_flipped, assume_unique=True)\n\n #narrays = points.GetNumberOfArrays()\n #for iarray in range(narrays):\n #name = points.GetArrayName(iarray)\n #print('iarray=%s name=%r' % (iarray, name))\n\n #------------------\n if representation == 'points':\n # we need to filter the nodes that were filtered by the\n # numpy setdiff1d, so we don't show extra points\n ugrid = create_filtered_point_ugrid(ugrid, nids, nids2)\n\n nids = nids2\n return ugrid, nids",
"def find_extra_surface_nodeids(orderids, sorted_large_nodes):\n extra_nodeids = []\n first_overlap_orderid = orderids[0]\n min_cfrom = sorted_nodes[first_overlap_orderid].cfrom\n\n # Check if any earlier nodes also in the region.\n while True:\n prev_cfrom = sorted_nodes[first_overlap_orderid-1].cfrom\n if prev_cfrom == min_cfrom:\n first_overlap_orderid -= 1\n extra_nodeids.append(sorted_nodes[first_overlap_orderid].nodeid)\n else:\n break\n\n # Find the last node in the region.\n last_overlap_orderid = matched_orderids[-1]\n max_cto = sorted_nodes[last_overlap_orderid].cto\n for i in range(first_overlap_orderid, len(sorted_nodes)):\n if sorted_nodes[i].cfrom > max_cto:\n break\n else:\n if i not in orderids:\n extra_nodeids.append(sorted_nodes[i].nodeid)\n cto = sorted_nodes[i].cto\n if cto >= max_cto:\n max_cto = cto\n if i > last_overlap_orderid:\n last_overlap_orderid = i\n\n return extra_nodeids",
"def _calcOrderedCellVertexIDs(self):\n ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l')\n indices = numerix.indices((self.nx, self.ny, self.nz))\n ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1)\n ids[0] = ids[1] + 1\n ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1)\n ids[2] = ids[3] + 1\n ids[5] = indices[0] + (indices[1] + indices[2] * (self.ny + 1) + 1) * (self.nx + 1)\n ids[4] = ids[5] + 1\n ids[7] = indices[0] + (indices[1] + indices[2] * (self.ny + 1)) * (self.nx + 1)\n ids[6] = ids[7] + 1\n\n return numerix.reshape(ids.swapaxes(1, 3), (8, self.numberOfCells))",
"def _get_planar_tri_edges(npts, tris):\n\n # Find the nodes associated with the triangle\n node_to_tris = []\n for i in range(npts):\n node_to_tris.append([])\n\n for index, tri in enumerate(tris):\n node_to_tris[tri[0]].append(index)\n node_to_tris[tri[1]].append(index)\n node_to_tris[tri[2]].append(index)\n\n # Assign edge numbers for each edge\n edges = []\n edge_to_tris = []\n num_edges = 0\n\n tri_to_edges = []\n for i in range(len(tris)):\n tri_to_edges.append([-1, -1, -1])\n\n for tri_index, tri in enumerate(tris):\n for e1_index, e1 in enumerate(_get_tri_edges(tri)):\n if tri_to_edges[tri_index][e1_index] < 0:\n match = False\n for adj_index in node_to_tris[e1[0]]:\n if adj_index != tri_index:\n for e2_index, e2 in enumerate(_get_tri_edges(tris[adj_index])):\n if ((e1[0] == e2[0] and e1[1] == e2[1]) or\n (e1[1] == e2[0] and e1[0] == e2[1])):\n match = True\n tri_to_edges[tri_index][e1_index] = num_edges\n tri_to_edges[adj_index][e2_index] = num_edges\n edges.append((e1[0], e1[1]))\n edge_to_tris.append((tri_index, adj_index))\n num_edges += 1\n break\n if match:\n break\n\n if not match:\n edges.append((e1[0], e1[1]))\n edge_to_tris.append((tri_index, -1))\n tri_to_edges[tri_index][e1_index] = num_edges\n num_edges += 1\n\n return edges, tri_to_edges, edge_to_tris",
"def strang_mesh(filename):\n\n from math import pi\n from anuga.utilities.numerical_tools import anglediff\n\n\n fid = open(filename)\n points = [] # List of x, y coordinates\n triangles = [] # List of vertex ids as listed in the file\n\n for line in fid.readlines():\n fields = line.split()\n if len(fields) == 2:\n # we are reading vertex coordinates\n points.append([float(fields[0]), float(fields[1])])\n elif len(fields) == 3:\n # we are reading triangle point id's (format ae+b)\n triangles.append([int(float(fields[0]))-1,\n int(float(fields[1]))-1,\n int(float(fields[2]))-1])\n else:\n raise Excetion('wrong format in %s' % filename)\n\n elements = [] #Final list of elements\n\n for t in triangles:\n #Get vertex coordinates\n v0 = t[0]\n v1 = t[1]\n v2 = t[2]\n\n x0 = points[v0][0]\n y0 = points[v0][1]\n x1 = points[v1][0]\n y1 = points[v1][1]\n x2 = points[v2][0]\n y2 = points[v2][1]\n\n #Check that points are arranged in counter clock-wise order\n vec0 = [x1-x0, y1-y0]\n vec1 = [x2-x1, y2-y1]\n vec2 = [x0-x2, y0-y2]\n\n a0 = anglediff(vec1, vec0)\n a1 = anglediff(vec2, vec1)\n a2 = anglediff(vec0, vec2)\n\n if a0 < pi and a1 < pi and a2 < pi:\n elements.append([v0, v1, v2])\n else:\n elements.append([v0, v2, v1])\n\n return points, elements",
"def dichoLocateMesh(rank,e1,xy1,e2,xy2,IKLE,MESHX,MESHY,tree):\n # ~~ Position the middle point ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n xyo = [ ( xy1[0]+xy2[0] )/2.0,( xy1[1]+xy2[1] )/2.0 ]\n eo,bo,tree = nearLocateMesh(xyo,IKLE,MESHX,MESHY,tree)\n if bo != []: return True,eo,xyo,bo\n\n # ~~ Limit the number of useless dichotomies ~~~~~~~~~~~~~~~~~~~~\n rank = rank + 1\n if rank > 3: return False,eo,xyo,bo\n\n # ~~ Sub-segments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n found,ej,xyj,bj = dichoLocateMesh(rank,e1,xy1,eo,xyo,IKLE,MESHX,MESHY,tree)\n if found: return found,ej,xyj,bj\n found,ej,xyj,bj = dichoLocateMesh(rank,eo,xyo,e2,xy2,IKLE,MESHX,MESHY,tree)\n if found: return found,ej,xyj,bj\n\n return False,eo,xyo,bo",
"def order_tris(tri):\n nv = tri.shape[0]\n for i in range(nv):\n Min = np.argmin(tri[i])\n tri[i] = tri[i,Min],tri[i,np.mod(Min+1,3)],tri[i,np.mod(Min+2,3)]\n return tri",
"def get_mesh_ids(self, body):\n with self.lock:\n return self.send_command('get_kinbody_link_mesh_ids ' + body.GetName())",
"def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]",
"def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]",
"def extract_ltri( m, context = FloatContext ):\n zero = context.zero\n n,n_ = shape_mat(m)\n return [[ m[i][j] if i >= j else zero \n for j in xrange(n_)] \n for i in xrange(n)]",
"def closest_points(self, entity: _entity_types) -> Tuple[Point3D, Point3D]:\n self_body = _union_entities(self.bodies)\n other_body = _union_entities(entity)\n\n occ1 = _create_component(root(), self_body, name=\"temp\")\n occ2 = _create_component(root(), other_body, name=\"temp\")\n\n try:\n result = app().measureManager.measureMinimumDistance(occ1.bRepBodies[0], occ2.bRepBodies[0])\n return result.positionOne, result.positionTwo\n finally:\n occ1.deleteMe()\n occ2.deleteMe()",
"def extract_ltri( m, context = FloatContext ):\n rows, cols = shape_mat(m)\n return [ row[:i+1] + [context.zero]*(cols - i - 1) \n for i, row in enumerate(m) ]"
] | [
"0.5805424",
"0.57145",
"0.56526566",
"0.565149",
"0.56275636",
"0.55619234",
"0.55525434",
"0.545092",
"0.54096115",
"0.53921366",
"0.538401",
"0.536471",
"0.5312883",
"0.5299891",
"0.5299523",
"0.52991015",
"0.52976525",
"0.5295863",
"0.5291774",
"0.5280542",
"0.52317286",
"0.522029",
"0.5215269",
"0.52111465",
"0.5209932",
"0.520846",
"0.5193939",
"0.5188033",
"0.51878315",
"0.5185307"
] | 0.59132826 | 0 |
get the skinweights form the skincluster, create a skintransforms node and connect it to drive the transforms | def createSkinTansformNode(skinCluster, transforms):
node = cmds.createNode('skinTransforms')
influences = cmds.listConnections('{}.matrix'.format(skinCluster), s=1, d=0)
for i, jnt in enumerate(influences):
cmds.connectAttr('{}.worldMatrix[0]'.format(jnt), '{}.matrix[{}]'.format(node,i))
m = cmds.getAttr('{}.wim[0]'.format(jnt))
cmds.setAttr ('{}.bindPreMatrix[{}]'.format(node, i), m, type="matrix")
mesh = cmds.deformer(skinCluster, q=1, g=1)[0]
for i, each in enumerate(transforms):
triangle = closestTriangleToTransform(each, mesh)
weights=list()
positions=list()
for vtx in triangle:
vtxName = '{}.vtx[{}]'.format(mesh, vtx)
weights.extend(cmds.skinPercent(skinCluster, vtxName, query=True, value=True ))
positions.extend(cmds.xform(vtxName, q=1, ws=1, t=1))
cmds.setAttr('{}.weightsList[{}].weights'.format(node, i), weights, type="doubleArray")
cmds.setAttr('{}.weightsList[{}].points'.format(node, i), positions, type="doubleArray")
m = cmds.getAttr('{}.wm[0]'.format(each));
cmds.setAttr('{}.weightsList[{}].pbm'.format(node, i), m, type="matrix")
for i, loc in enumerate(transforms):
dec = cmds.createNode('decomposeMatrix')
cmds.connectAttr('{}.outputs[{}]'.format(node, i), '{}.inputMatrix'.format(dec))
cmds.connectAttr('{}.outputTranslate'.format(dec), '{}.translate'.format(loc))
cmds.connectAttr('{}.outputRotate'.format(dec), '{}.rotate'.format(loc))
cmds.connectAttr('{}.outputScale'.format(dec), '{}.scale'.format(loc))
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_cluster_weights(shape, weight_file, method=\"bilinear\"):\n\n # gets the temporary folder path\n temp_path = get_temp_folder()\n short_name = get_prefix_less_name(shape)\n\n for node in weight_file:\n if not weight_file[node]:\n continue\n cmds.deformerWeights(weight_file[node], im=True, shape=short_name,\n deformer=node, path=temp_path, method=method,\n vertexConnections=True)",
"def get_skincluster_info(skin_node):\n output = {\n 'joint_list': [],\n 'skin_method': 0,\n 'use_max_inf': False,\n 'max_inf_count': 4,\n }\n\n if skin_node:\n output['joint_list'] = skin_node.getInfluence()\n output['skin_method'] = skin_node.getSkinMethod()\n output['use_max_inf'] = skin_node.getObeyMaxInfluences()\n output['max_inf_count'] = skin_node.getMaximumInfluences()\n\n return output",
"def skinCluster(*args, addInfluence: Union[AnyStr, List[AnyStr]]=\"\", addToSelection: bool=True,\n after: bool=True, afterReference: bool=True, baseShape: Union[AnyStr,\n List[AnyStr]]=\"\", before: bool=True, bindMethod: Union[int, bool]=0,\n deformerTools: bool=True, dropoffRate: Union[float, bool]=0.0, exclusive:\n Union[AnyStr, bool]=\"\", forceNormalizeWeights: bool=True, frontOfChain:\n bool=True, geometry: Union[AnyStr, List[AnyStr], bool]=\"\", geometryIndices:\n bool=True, heatmapFalloff: float=0.0, ignoreBindPose: bool=True,\n ignoreHierarchy: bool=True, ignoreSelected: bool=True, includeHiddenSelections:\n bool=False, influence: Union[AnyStr, bool]=\"\", lockWeights: bool=True,\n maximumInfluences: Union[int, bool]=0, moveJointsMode: bool=True, name:\n AnyStr=\"\", normalizeWeights: Union[int, bool]=0, nurbsSamples: int=0,\n obeyMaxInfluences: bool=True, parallel: bool=True, polySmoothness: float=0.0,\n prune: bool=True, recacheBindMatrices: bool=True, remove: Union[bool,\n List[bool]]=True, removeFromSelection: bool=True, removeInfluence: Union[AnyStr,\n List[AnyStr]]=\"\", removeUnusedInfluence: bool=True, selectInfluenceVerts:\n AnyStr=\"\", skinMethod: Union[int, bool]=1, smoothWeights: float=0.0,\n smoothWeightsMaxIterations: int=2, split: bool=True, toSelectedBones: bool=True,\n toSkeletonAndTransforms: bool=True, unbind: bool=True, unbindKeepHistory:\n bool=True, useGeometry: bool=True, volumeBind: float=0.0, volumeType: int=0,\n weight: float=0.0, weightDistribution: Union[int, bool]=1, weightedInfluence:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass",
"def getSkinnedMeshes(skin_clusters):\n skin_info = {}\n for skin_cluster in skin_clusters:\n joints = skin_cluster.influenceObjects()\n root_joint = getRootParent(joints[0])\n geometry = set(skin_cluster.getGeometry())\n skin_info[root_joint] = skin_info[root_joint] | geometry if root_joint in skin_info else geometry\n\n return skin_info",
"def create_skincluster_backup(shape, skin_node):\n\n logger.info(\"Creating skin backup for {}\".format(skin_node))\n\n # gets the skin cluster influences\n influences = cmds.listConnections(\"{}.matrix\".format(skin_node))\n\n # creates a duplicate shape of the given shape\n holder_name = \"{}_flex_skin_shape_holder\".format(\n get_prefix_less_name(shape))\n shape_duplicate = create_duplicate(shape, holder_name)\n\n # creates new skin cluster node on duplicate\n skin_holder = cmds.skinCluster(influences, shape_duplicate, bindMethod=0,\n obeyMaxInfluences=False, skinMethod=0,\n weightDistribution=0, normalizeWeights=1,\n removeUnusedInfluence=False, name=\"{}_SKN\"\n .format(holder_name))\n\n # copy the given skin node weights to back up shape\n copy_skin_weights(skin_node, skin_holder[0])\n\n return [\"{}\".format(skin_holder[0])]",
"def copy_skin_weights(source_skin, target_skin):\n\n # gets the shape back from the source_skin and target_skin\n # need to do this as providing the sourceSkin and destinationSkin arguments\n # to the copySkinWeights command does not update correctly the shapes\n\n source_shape = cmds.ls(cmds.listHistory(\"{}.outputGeometry\".format(\n source_skin), pdo=False, future=True), dag=True,\n noIntermediate=True)\n target_shape = cmds.ls(cmds.listHistory(\n \"{}.outputGeometry\".format(target_skin),\n pdo=False, future=True), dag=True,\n noIntermediate=True)\n\n # checks if source and target shapes list are bigger than 1\n if len(source_shape) > 1:\n source_shape = source_shape[0]\n if len(target_shape) > 1:\n target_shape = target_shape[0]\n\n cmds.select(source_shape, target_shape)\n\n # copy skin command\n cmds.copySkinWeights(surfaceAssociation=\"closestPoint\", noMirror=True,\n influenceAssociation=(\"label\",\n \"closestJoint\",\n \"oneToOne\"))\n\n # forces refresh\n cmds.refresh()",
"def getSkinCluster(_transform):\n result = []\n if not (pm.objExists(_transform)):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n \n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False",
"def createAndImport(cls, filePath=None, shape=None):\n\n if not shape:\n try:\n shape = cmds.ls(sl=1)[0]\n\n except:\n raise RuntimeError('No shape selected')\n\n if filePath == None:\n startDir = cmds.workspace(q=1, rootDirectory=1)\n filePath = cmds.fileDialog2(dialogStyle=2, fileMode=1, startingDirectory=startDir,\n fileFilter='Skin Files (*%s)' % SkinCluster.kFileExtension)\n\n if not filePath:\n return\n if not isinstance(filePath, basestring):\n filePath = filePath[0]\n\n # Read the data from the file\n fh = open(filePath, 'rb')\n data = pickle.load(fh)\n fh.close()\n\n # Make sure the vertex count is the same\n meshVertices = cmds.polyEvaluate(shape, vertex=1)\n\n importedVertices = len(data['blendWeights'])\n if meshVertices != importedVertices:\n raise RuntimeError('Vertex counts do not match. %d != %d' % (meshVertices, importedVertices))\n\n\n # check if the shape already has a skinCluster\n if SkinCluster.getSkinCluster(shape):\n skinCluster = SkinCluster(shape)\n else:\n # create a new skinCluster\n joints = data['weights'].keys()\n\n # Make sure all the joints exist\n\n unusedImports = []\n # Create a set for get which joint in the scene doesn't have weights\n noMatch = set([SkinCluster.removeNamespaceFromString(x) for x in cmds.ls(type='joint')])\n\n for j in joints:\n if j in noMatch:\n noMatch.remove(j)\n else:\n unusedImports.append(j)\n\n # Remapping the joints\n # if there were unmapped influences ask the user to map them\n if unusedImports and noMatch:\n\n mappingDialog = WeightRemapDialog(getMayaWindow())\n mappingDialog.setInfluences(unusedImports, noMatch)\n mappingDialog.exec_()\n\n for src, dst in mappingDialog.mapping.items():\n # swap the mapping\n data['weights'][dst] = data['weights'][src]\n del data['weights'][src]\n\n # Create the skinCluster with post normalization so setting the weights does not\n # normalize all weights\n joints = data['weights'].keys()\n\n skinCluster = cmds.skinCluster(joints, shape, tsb=1, nw=2, n=data['name'])\n skinCluster = SkinCluster(shape)\n\n skinCluster.setData(data)\n print \"Imported %s\" % filePath",
"def getSkinCluster(src):\n\n if cmds.nodeType(src) == \"skinCluster\":\n srcSkin = src\n else:\n srcSkin = mel.eval('findRelatedSkinCluster(\"' + src + '\")')\n\n return srcSkin",
"def b4Wan():\n \n tors, edges = tp.mesh_topo()\n G = build_graph(edges)\n \n # Get the routing path of all nodes\n table_file_name = '../outputs/mesh_routing_table.txt'\n table = all_routing(G, tors, table_file_name)\n if((os.path.isfile(table_file_name)) == False):\n table = all_routing(G, tors, table_file_name)\n else:\n json_data = open(table_file_name).read()\n table = json.loads(json_data)\n \n seeds, polys = cf.get_seeds_table(tors) #\n\n return G, tors, edges, table, seeds, polys",
"def initialise_weights(self): \n \n def initialise_process(param):\n \n \"\"\"\n Initialises weights of a given parameter following either Xavier or Kaiming uniform or normal processes.\n \n : param (torch.Tensor):\n \n \"\"\"\n \n if self._initialisation_process == 'xavier_uniform':\n tnni.xavier_uniform_(param.data)\n elif self._initialisation_process == 'xavier_normal':\n tnni.xavier_normal_(param.data)\n elif self._initialisation_process == 'kaiming_uniform':\n tnni.kaiming_uniform_(param.data)\n elif self._initialisation_process == 'kaiming_normal':\n tnni.kaiming_normal_(param.data)\n \n if self._initialisation_process is not None:\n for m in self.modules():\n # Embedding\n if type(m) is nn.Embedding:\n tnni.normal_(self.embedding.weight)\n # RNN\n elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]: \n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n initialise_process(param)\n #torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n tnni.orthogonal_(param.data)\n elif 'bias' in name:\n # Bias initialised with zero will get the bias from\n # the forget gate\n param.data.fill_(0.0)\n param.data[self._hidden_size:self.directions*self._hidden_size].fill_(1.0)\n # Attention linear layer\n elif type(m) is nn.Linear:\n for name, param in m.named_parameters():\n if 'weight' in name:\n initialise_process(param.data)\n elif 'bias' in name:\n param.data.normal_()",
"def make_nodes(pyassimp_node):\n trs_keyframes = transform_keyframes.get(pyassimp_node.name, (None,))\n\n node = SkinningControlNode(*trs_keyframes, name=pyassimp_node.name,\n transform=pyassimp_node.transformation)\n nodes[pyassimp_node.name] = node, pyassimp_node\n node.add(*(make_nodes(child) for child in pyassimp_node.children))\n return node",
"def load_skinned(file):\n try:\n option = pyassimp.postprocess.aiProcessPreset_TargetRealtime_MaxQuality\n scene = pyassimp.load(file, option)\n except pyassimp.errors.AssimpError:\n #print('ERROR: pyassimp unable to load', file)\n return []\n\n # ----- load animations\n def conv(assimp_keys, ticks_per_second):\n \"\"\" Conversion from assimp key struct to our dict representation \"\"\"\n return {key.time / ticks_per_second: key.value for key in assimp_keys}\n\n # load first animation in scene file (could be a loop over all animations)\n transform_keyframes = {}\n if scene.animations:\n anim = scene.animations[0]\n for channel in anim.channels:\n # for each animation bone, store trs dict with {times: transforms}\n # (pyassimp name storage bug, bytes instead of str => convert it)\n transform_keyframes[channel.nodename.data.decode('utf-8')] = (\n conv(channel.positionkeys, anim.tickspersecond),\n conv(channel.rotationkeys, anim.tickspersecond),\n conv(channel.scalingkeys, anim.tickspersecond)\n )\n\n # Note: embedded textures not supported at the moment\n path = os.path.dirname(file)\n for mat in scene.materials:\n mat.tokens = dict(reversed(list(mat.properties.items())))\n if 'file' in mat.tokens: # texture file token\n tname = mat.tokens['file'].split('/')[-1].split('\\\\')[-1]\n # search texture in file's whole subdir since path often screwed up\n tname = [os.path.join(d[0], f) for d in os.walk(path) for f in d[2]\n if tname.startswith(f) or f.startswith(tname)]\n if tname:\n mat.texture = tname[0]\n else:\n print('Failed to find texture:', tname)\n\n # ---- prepare scene graph nodes\n # create SkinningControlNode for each assimp node.\n # node creation needs to happen first as SkinnedMeshes store an array of\n # these nodes that represent their bone transforms\n nodes = {} # nodes: string name -> node dictionary\n\n def make_nodes(pyassimp_node):\n \"\"\" Recursively builds nodes for our graph, matching pyassimp nodes \"\"\"\n trs_keyframes = transform_keyframes.get(pyassimp_node.name, (None,))\n\n node = SkinningControlNode(*trs_keyframes, name=pyassimp_node.name,\n transform=pyassimp_node.transformation)\n nodes[pyassimp_node.name] = node, pyassimp_node\n node.add(*(make_nodes(child) for child in pyassimp_node.children))\n return node\n\n root_node = make_nodes(scene.rootnode)\n\n # ---- create SkinnedMesh objects\n for mesh in scene.meshes:\n # -- skinned mesh: weights given per bone => convert per vertex for GPU\n # first, populate an array with MAX_BONES entries per vertex\n v_bone = np.array([[(0, 0)]*MAX_BONES] * mesh.vertices.shape[0],\n dtype=[('weight', 'f4'), ('id', 'u4')])\n for bone_id, bone in enumerate(mesh.bones[:MAX_BONES]):\n for entry in bone.weights: # weight,id pairs necessary for sorting\n v_bone[entry.vertexid][bone_id] = (entry.weight, bone_id)\n\n v_bone.sort(order='weight') # sort rows, high weights last\n v_bone = v_bone[:, -MAX_VERTEX_BONES:] # limit bone size, keep highest\n\n # prepare bone lookup array & offset matrix, indexed by bone index (id)\n bone_nodes = [nodes[bone.name][0] for bone in mesh.bones]\n bone_offsets = [bone.offsetmatrix for bone in mesh.bones]\n\n try :\n # Si les textures sont définies : corp principal du dinosaure\n texture = scene.materials[mesh.materialindex].texture\n # tex coords in raster order: compute 1 - y to follow OpenGL convention\n if mesh.texturecoords.size:\n tex_uv = np.array((0, 1) + mesh.texturecoords[0][:, :2] * (1, -1), dtype=np.float32)\n else:\n tex_uv = None\n\n tangents = []\n bitangents = []\n\n for face in mesh.faces:\n # Calcul des tangentes et bitangentes pour chaque face de la figure\n v0 = mesh.vertices[face[0]]\n v1 = mesh.vertices[face[1]]\n v2 = mesh.vertices[face[2]]\n\n uv0 = tex_uv[face[0]]\n uv1 = tex_uv[face[1]]\n uv2 = tex_uv[face[2]]\n\n deltaPos1 = [v1[i] - v0[i] for i in range(3)]\n deltaPos2 = [v2[i] - v0[i] for i in range(3)]\n\n deltaUV1 = [uv1[i] - uv0[i] for i in range(2)]\n deltaUV2 = [uv2[i] - uv0[i] for i in range(2)]\n\n r = 1 / ((deltaUV1[0]*deltaUV2[1]) - (deltaUV2[0]*deltaUV1[1]))\n tangent = [(deltaPos1[i]*deltaUV2[1])-(deltaPos2[i]*deltaUV1[1]) for i in range(3)]\n bitangent = [(deltaPos2[i]*deltaUV2[1])-(deltaPos1[i]*deltaUV1[1]) for i in range(3)]\n\n tangents.append(tangent)\n bitangents.append(bitangent)\n\n\n # initialize skinned mesh and store in pyassimp_mesh for node addition\n # ajout des coordonnées uv de texture, des tangentes et bitangentes\n mesh.skinned_mesh = SkinnedTextMesh(\n [mesh.vertices, mesh.normals, v_bone['id'], v_bone['weight'], tex_uv, tangents, bitangents],\n bone_nodes, bone_offsets, texture, mesh.faces\n )\n except AttributeError:\n # cas sans textures définies (dents du dinosaure)\n mesh.skinned_mesh = SkinnedMesh(\n [mesh.vertices, mesh.normals, v_bone['id'], v_bone['weight']],\n bone_nodes, bone_offsets, mesh.faces\n )\n\n\n # ------ add each mesh to its intended nodes as indicated by assimp\n for final_node, assimp_node in nodes.values():\n final_node.add(*(_mesh.skinned_mesh for _mesh in assimp_node.meshes))\n\n nb_triangles = sum((mesh.faces.shape[0] for mesh in scene.meshes))\n # print('Loaded', file, '\\t(%d meshes, %d faces, %d nodes, %d animations)' %\n # (len(scene.meshes), nb_triangles, len(nodes), len(scene.animations)))\n pyassimp.release(scene)\n return [root_node]",
"def transform(nodes, weights, new_corners):\n if nodes.shape[1] == 1:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n M = np.zeros((1, 1))\n M[:, 0] = 0.5 * (x_1 - x_0)\n origin = np.array([-1.0])\n elif nodes.shape[1] == 2:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n M = np.zeros((2, 2))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n origin = np.array([-1.0, -1.0])\n elif nodes.shape[1] == 3:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n x_3 = new_corners[3, :]\n M = np.zeros((3, 3))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n M[:, 2] = 0.5 * (x_3 - x_0)\n origin = np.array([-1.0, -1.0, -1.0])\n\n offset = -M @ origin + x_0\n volume_fraction = np.abs(np.linalg.det(M))\n return np.add(nodes @ M.T, offset), volume_fraction * weights",
"def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents",
"def transfer_skincluster(source_object, target_objects, prune_after = False):\n source_skin_node = get_skincluster_node(source_object)\n assert source_skin_node, 'Skincluster not found in source object.'\n skin_info = get_skincluster_info(source_skin_node)\n joint_list = skin_info['joint_list']\n skin_method = skin_info['skin_method']\n for tgt_obj in target_objects:\n old_tgt_skin_node = get_skincluster_node(tgt_obj)\n if old_tgt_skin_node:\n old_tgt_skin_node.unbind()\n try:\n tgt_skin_node = pm.skinCluster(joint_list, tgt_obj, skinMethod = skin_method)\n except:\n tgt_skin_node = pm.skinCluster(joint_list, tgt_obj)\n pm.copySkinWeights(\n sourceSkin = source_skin_node,\n destinationSkin = tgt_skin_node,\n noMirror = True,\n surfaceAssociation = 'closestPoint',\n influenceAssociation = ['name', 'oneToOne', 'closestJoint'],\n )\n remove_unused_influence(tgt_skin_node)\n\n if prune_after:\n prune_skincluster(tgt_skin_node)",
"def assign_even_weights(vertex_circumference, upper_joint, lower_joint,\n reorder_vertices=[]):\n # Establish the shape node with the skin cluster\n vert_selection = cmds.ls(selection=True, shapes=True)\n if not vert_selection:\n vert_selection = cmds.listRelatives(children=True)\n # Skin cluster\n selection_skin = cmds.listConnections(vert_selection,\n source=True,\n exactType=True,\n type='skinCluster')[0]\n # Must use ONLY one shape value, otherwise it will do the sum of the list\n total_vertices = cmds.polyEvaluate(vert_selection[0], vertex=True)\n\n # Fractions for percent value assignments on vertices\n vertex_percent = 1 / (float(total_vertices) / float(vertex_circumference))\n inverse_percent = 1 - vertex_percent\n\n # Temp geo object for the mesh reordering (work with referenced files)\n temp_geo = cmds.duplicate(vert_selection)[0]\n\n # If no vertex values are given, a best guess will be made (not recommended)\n if not reorder_vertices:\n # 2018 specific command\n cmds.meshReorder(temp_geo + '.vtx[0]',\n temp_geo + '.vtx[1]',\n temp_geo + '.vtx[' + str(vertex_circumference) + ']')\n else:\n cmds.meshReorder(temp_geo + reorder_vertices[0],\n temp_geo + reorder_vertices[1],\n temp_geo + reorder_vertices[2])\n skin = cmds.skinCluster(temp_geo, upper_joint, lower_joint, tsb=True)[0]\n\n # Dictionary for returned keys and values if debugging needed later\n percent_list = {}\n # Mutable percent variable\n new_percent = vertex_percent\n index = 0\n index_end = vertex_circumference - 1\n while index <= total_vertices:\n verts = '%s.vtx[%s:%s]' % (temp_geo, str(index), str(index_end))\n cmds.skinPercent(skin, verts,\n transformValue=[(lower_joint, inverse_percent),\n (upper_joint, new_percent)])\n percent_list[verts] = [new_percent, inverse_percent]\n\n new_percent = new_percent + vertex_percent\n inverse_percent = 1 - new_percent\n\n index_end = index_end + vertex_circumference\n index = index + vertex_circumference\n\n if not cmds.objExists(verts):\n cmds.warning('Function is evaluating more vertices than exist! '\n 'Returned weights may not be correct.')\n break\n\n # Copy new weights to original geo; Mesh reorder no longer required\n cmds.copySkinWeights(sourceSkin=skin, destinationSkin=selection_skin,\n noMirror=True)\n cmds.delete(temp_geo)\n return percent_list",
"def get_skincluster_node(input_object):\n history_list = input_object.listHistory(pruneDagObjects = True, interestLevel = True)\n skin_node = None\n for o in history_list:\n if o.nodeType() == 'skinCluster':\n skin_node = o\n\n return skin_node",
"def from_dict(self, handler_dict):\n nb_clusters = handler_dict['nb_clusters']\n while len(self.clusters) < nb_clusters:\n self.clusters.append(C.Cluster())\n\n # Weights\n weight_dict = handler_dict['weights']\n if weight_dict is not None:\n # Network batch\n net = weight_dict['net']\n if net is not None:\n repr_dict = net['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = net['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n for param in list(self.net.named_parameters()):\n tensor_name = param[0]\n self.tensor_info[tensor_name] = (param[1], perturbs, repr)\n\n # Modules batch\n modules = weight_dict['modules']\n if modules is not None:\n for module in modules:\n module_name = module['name']\n\n repr_dict = module['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = module['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n else:\n perturbs=None\n \n current_mod = dict(self.net.named_modules())[module_name]\n for param_key in dict(current_mod.named_parameters()):\n full_key = module_name + '.' + param_key\n tens = dict(current_mod.named_parameters())[param_key]\n self.tensor_info[full_key] = (tens, perturbs, repr)\n\n # Tensors\n tensors = weight_dict['tensors']\n if tensors is not None:\n for tensor in tensors:\n tensor_name = tensor['name']\n\n repr_dict = tensor['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = tensor['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n else:\n perturbs=None\n\n tens = dict(self.net.named_parameters())[tensor_name]\n self.tensor_info[tensor_name] = (tens, perturbs, repr)\n\n # Activations\n acti_dict = handler_dict['activations']\n if acti_dict is not None:\n # Network batch\n net = acti_dict['net']\n if net is not None:\n repr_dict = net['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = net['perturb']\n perturbs = []\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n for name, module in self.net.named_modules():\n hook = Hook(perturbs, repr)\n self.hooks[name] = module.register_forward_hook(\n hook.hook_fn)\n self.acti_info[name] = (perturbs, repr)\n\n # Modules batch\n modules = acti_dict['modules']\n if modules is not None:\n for module in modules:\n module_name = module['name']\n\n repr_dict = module['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = module['perturb']\n perturbs = []\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n current_mod = dict(self.net.named_modules())[module_name]\n hook = Hook(perturbs, repr)\n self.hooks[module_name] = current_mod.register_forward_hook(\n hook.hook_fn)\n self.acti_info[module_name] = (perturbs, repr)\n\n # Cluster assignement\n self.assign_clusters()",
"def init_weights(model):\n ...",
"def get_weights(self):",
"def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )",
"def create_clusters_backup(shape, nodes):\n\n logger.info(\"Creating cluster weights backup for {}\".format(nodes))\n\n # gets the temp folder path\n temp_path = get_temp_folder()\n\n # prefix less shape name\n shape = get_prefix_less_name(shape)\n\n # dict for weights files\n weight_files = {}\n\n for node in nodes:\n # If there is not weights creating the deformer maps is useless\n try:\n cmds.getAttr(\"{}.weightList[0].weights\".format(node))\n except RuntimeError:\n weight_files[node] = None\n continue\n # Creates the weight map if weights are found on shape points\n cmds.deformerWeights('{}_{}.xml'.format(shape, node), export=True,\n vertexConnections=True, weightPrecision=5,\n shape=shape, deformer=node, path=temp_path)\n weight_files[node] = '{}_{}.xml'.format(shape, node)\n\n return weight_files",
"def all_simplices_node_weights(self, indices, simplex_weights, n_dim):\n #if 'ANS' in self.__class__.__name__:\n # print(os.getpid(), \"zzz\", indices.shape, n_dim)\n\n all_simplices = all_neighbor_simplices_real_idx(n_dim, indices)\n\n #if 'ANS' in self.__class__.__name__:\n # print(os.getpid(), \"ggg\", indices.shape)\n\n if simplex_weights is not None:\n all_simplices_nidx = base_idx_neighbor_idx_simplices(n_base=indices.shape[0],\n n_neighbors=indices.shape[1],\n n_dim=n_dim)\n base_indices = all_simplices_nidx[:,0]\n neighbor_indices = all_simplices_nidx[:,1:]\n #print(os.getpid(), \"hhh\", neighbor_indices.shape)\n node_weights = array_array_index(simplex_weights[base_indices], neighbor_indices)\n node_weights = np.prod(node_weights, axis=1)\n else:\n node_weights = np.repeat(1.0, all_simplices.shape[0])\n\n return all_simplices, node_weights",
"def __init__(self, weights, path, trained, asGraph):\n \n _weights = np.asarray(weights)\n\n numLayers = int(_weights.shape[0]/2)\n wghts = []\n biases = []\n\n for i in range(numLayers):\n j = 2*i\n# print(j,(_weights[j].T).shape)\n wghts.append(_weights[j])\n j = 2*i + 1\n# print(j,(_weights[j].T).shape)\n biases.append(_weights[j])\n #enddo\n\n self.numLayers = numLayers\n self.wghts = np.asarray(wghts)\n self.asGraph = asGraph\n self.wghts = wghts\n self.path = path\n self.trained = trained",
"def get_weights(self, nn_weights, rov_id): # Get weights from CCEA population\n\n for w in range(self.n_weights):\n self.weights[rov_id, w] = nn_weights[w]",
"def load_opcodes_subnetwork_pretrained_weights(self, model):\n print(\"ToImplement\")",
"def init_weights(self, pretrained=None):\n super(SingleTwoStageDetector176PRW, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)\n self.bbox_head.init_weights()",
"def create_weight_matrices(self):\n rad = 1 / np.sqrt(self.no_of_in_nodes)\n X = truncated_normal(mean=0, \n sd=1, \n low=-rad, \n upp=rad)\n self.wih = X.rvs((self.no_of_hidden_nodes, \n self.no_of_in_nodes))\n rad = 1 / np.sqrt(self.no_of_hidden_nodes)\n X = truncated_normal(mean=0, sd=1, low=-rad, upp=rad)\n self.who = X.rvs((self.no_of_out_nodes, \n self.no_of_hidden_nodes))",
"def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)"
] | [
"0.60992557",
"0.59548813",
"0.5897242",
"0.58499587",
"0.5831983",
"0.57551503",
"0.57265556",
"0.5725397",
"0.5677713",
"0.54353786",
"0.5421846",
"0.5350296",
"0.5301341",
"0.5252528",
"0.5239307",
"0.52385575",
"0.52209234",
"0.5210054",
"0.52021974",
"0.51798564",
"0.51774186",
"0.5136345",
"0.5134267",
"0.51132727",
"0.5112217",
"0.5100301",
"0.50789917",
"0.5077726",
"0.5077443",
"0.5061176"
] | 0.72898114 | 0 |
Changes the value of var to value, in target card json target | def doEdit(var, value, target):
currentValue = target.get(var, "")
newValue = Simplifier.simplify(str(value).replace(f"{{{var}}}", str(currentValue)))
target[var] = newValue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)",
"def updateValue(self):\n self.value = self.var.get()",
"def set_variable(self, request, context):\n response = SetVariableResponse()\n value = decode(request.value)\n self._delegator.set_variable(request.component, request.variable, value)\n return response",
"def __setattr__(self, key, value):\n if key != 'json_data':\n self.get_data()[key] = value\n else:\n super(BaseJsonEncodableObject, self).__setattr__(key, value)",
"def variable(self, val):",
"def update_variable(value):\n return value",
"def set_card (self, card):\n\t\tif ((card == 1) or (card == 2)):\n\t\t\tself._card = card\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s card number must be 1 or 2 so it can't be %s !\\n\" % (self._target_id, card))\n\t\t\tsys.exit(1)",
"def set_value ( self, object, value ):\n target, name = self.target_name( object )\n setattr( target, name, value )",
"def bcp_player_variable(self, name, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player[name] = value",
"def set_custom_variable(self, key, value):\n self.logger.info(\"Set custom variable : %s:%s\" % (key, value))\n\n try:\n if 'custom_variables' not in self._answer_payload:\n self._answer_payload['custom_variables'] = {}\n self._answer_payload['custom_variables'][key] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)",
"def set_custom_value(self, value):\n self.logger.info(\"Set custom value : %s\" % value)\n\n try:\n self._answer_payload['custom_value'] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)",
"def set(self,obj,value):\r\n\t\tvalue = self.parse(value)\r\n\t\tsetattr(obj,self.name,value)",
"def set_value(self, target, value):\n\n\t\t# Type checking.\n\t\tvar_type = self.variables[target[0]]\n\t\tif var_type == 'acq_marker':\n\t\t\tif len(target) == 1:\n\t\t\t\traise TypeError('Must assign dictionary to acq_marker')\n\t\t\telse:\n\t\t\t\tif target[1] == 'marker_num':\n\t\t\t\t\tif not isinstance(value, int):\n\t\t\t\t\t\traise TypeError('Must assign int to acq_marker num')\n\t\t\t\telif target[1] == 'output':\n\t\t\t\t\tif not isinstance(value, basestring):\n\t\t\t\t\t\traise TypeError('Must assign string to acq_marker num')\n\t\telif var_type == 'delay':\n\t\t\ttry:\n\t\t\t\tvalue.assert_dimensions('s')\n\t\t\texcept (AttributeError, IncompatibleDimensions):\n\t\t\t\traise TypeError('Must assign time quantity to delay')\n\t\telif var_type == 'int':\n\t\t\tif not isinstance(value, int):\n\t\t\t\traise TypeError('Must assign integer to int')\n\t\telif var_type == 'pulse':\n\t\t\tif len(target) == 1:\n\t\t\t\traise TypeError('Must assign dictionary to pulse')\n\t\t\telse:\n\t\t\t\tif target[1] == 'amplitude':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tvalue.assert_dimensions('V')\n\t\t\t\t\texcept (AttributeError, IncompatibleDimensions):\n\t\t\t\t\t\traise TypeError('Must assign voltage quantity to pulse amplitude')\n\t\t\t\telif target[1] == 'length':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tvalue.assert_dimensions('s')\n\t\t\t\t\texcept (AttributeError, IncompatibleDimensions):\n\t\t\t\t\t\traise TypeError('Must assign time quantity to pulse length')\n\t\t\t\telif target[1] == 'shape':\n\t\t\t\t\tif not isinstance(value, basestring):\n\t\t\t\t\t\traise TypeError('Must assign string to pulse shape')\n\t\telse:\n\t\t\traise TypeError('Cannot assign to variable of type \"{0}\"'.format(var_type))\n\n\t\tif target in self.values and self.values[target] is not None:\n\t\t\traise TypeError('Re-assignment of {0}'.format(target))\n\t\telse:\n\t\t\tself.values[target] = value",
"def change_var(self, var):\n return _coconut_tail_call(self.__class__, var, self.elem.substitute({self.var: var}))",
"def _set_param(self, name, value):\n self._frozenjson._data[name] = value",
"def test_edit_and_write_rapid_data_correct(self):\n got_var, var_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'var_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n _ = rapid_jointtarget.edit_and_write_rapid_data(var_jtar, '[0,0,0,0,0,0]', '[0,0,0,0,0,0]')\n self.assertEqual(var_jtar.Value.ToString(), '[[0,0,0,0,0,0],[0,0,0,0,0,0]]')",
"def value(self, value):\n self.set_data(value)",
"def changeValue(situation , valueToPlay, player):\r\n situation[valueToPlay[0]][valueToPlay[1]] = Player.get_spec(player)\r\n return situation",
"def assign(self, var, value):\n\t\tself._root = self._insert(self._root, var, value)",
"def updateVar(self, id, value, type_):\n if id in self.variables:\n symbol = self.variables[id]\n symbol = sym.Symbol(id, value, type_, symbol.row, symbol.column)\n self.variables[id] = symbol\n return True",
"def editDataVariable(self, product, data_variable):\r\n return data_variable",
"def assign(self, value):\n self.value = value",
"def convert_json(option, _, value, parser):\n setattr(parser.values, option.dest, json.loads(value))",
"def turn_on(self, **kwargs):\n setattr(self.resource, self.variable, True)",
"def setValue(self,variable,value):\n for adjective_key in value:\n variable.adjectives[adjective_key].membership = value[adjective_key]\n return None",
"def setLocal(name, value):",
"def set_data(node, value):\n node['data'] = value",
"def assign(self, V, py):\n V.value = py",
"def assign(self, V, py):\n V.value = py",
"def assign(self, V, py):\n V.value = py"
] | [
"0.5401657",
"0.53921354",
"0.5373613",
"0.53605235",
"0.5335546",
"0.5331462",
"0.5317318",
"0.5298089",
"0.5265769",
"0.5262278",
"0.52587116",
"0.5233701",
"0.520725",
"0.514586",
"0.51372725",
"0.50887924",
"0.5060526",
"0.5028422",
"0.50151414",
"0.5007186",
"0.49935517",
"0.4990905",
"0.49742275",
"0.4961379",
"0.4955869",
"0.49422944",
"0.4920466",
"0.4910594",
"0.4910594",
"0.4910594"
] | 0.5842602 | 0 |
Return a numeric PIN with length digits | def get_pin(length=6):
pin = str(random.sample(range(10 ** (length - 1), 10 ** length), 1)[0])
print("pin "+pin)
return pin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __generate_pin(cls) -> str:\n return str(randbelow(10 ** cls.PIN_DIGITS)).zfill(cls.PIN_DIGITS)",
"def get_pin_digits(pin):\n digits = []\n for i in range(1, 5):\n digit = pin % 10\n pin = int(pin / 10)\n digits = [digit] + digits\n return digits",
"def number2patten(number, length):\n if length == 1:\n return NUMBER_TO_BASE[number]\n prefix_index = number // 4\n base = NUMBER_TO_BASE[number % 4]\n return number2patten(prefix_index, length - 1) + base",
"def generateNumericTokenOfLength(length: int) -> str:\n return \"\".join([r.choice(string.digits) for _ in range(length)])",
"def genAlphaNumPwd(length):\n return genPwd(string.ascii_letters + string.digits, length)",
"def safe_number(self):\n mask = '*' * (len(self.account_number) - 4)\n return '{0}{1}'.format(mask, self.account_number[-4:])",
"def create_phone_number(n):",
"def generate_password(length=20):\r\n # type: (int) -> str\r\n return ('%0'+str(length)+'x') % random.randrange(16 ** length)",
"def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])",
"def genRingSetting(self):\n num = random.randrange(0,25) #Generates a random number from 0 to 25\n if num < 10: #If the number is a single digit\n num = str(num) #Turn it into a string\n num = '0' + num #Add a 0 before it\n return str(num) #Return the string of the number to the user in double digit format",
"def format(id, length=5):\n return str(bin(id))[2:] if len(str(int(id))[2:])>4 else (5-len(str(bin(id))[2:]))*\"0\"+str(bin(id))[2:]",
"def genpass(length):\n password = \"\"\n choice = string.ascii_letters + string.digits\n for i in range(length):\n password += random.choice(choice)\n return password",
"def num_string(length):\n import math\n base = 10\n lines = []\n for ndigit in range(int(math.log10(length)) + 1):\n interval = int(math.pow(base, ndigit))\n lines.append(''.join(\n (str(n % base) + (' ' * (interval-1)))\n for n in range((length - 1) // interval + 1)))\n return '\\n'.join(lines)",
"def gtin_pad(gtin):\n zero_space = 11 - len(gtin)\n gtin = '%s%s' % ('0'*zero_space, gtin)\n if len(gtin) == 11:\n gtin = '%s%s' % (gtin, gtin_checksum(gtin))\n return gtin",
"def password_numerical(i):\r\n\r\n return ''.join(_random.choice(string.digits) for x in range(i))",
"def get_random_string(length):\n return \"{0:0{1}x}\".format(random.getrandbits(length * 4), length)",
"def int_to_address(n, length):\n return \"{0:b}\".format(n).zfill(length)",
"def completed_number(prefix, length):\n\n\tccnumber = prefix\n\t# generate digits\n\twhile len(ccnumber) < (length - 1):\n\t\tdigit = str(generator.choice(range(0, 10)))\n\t\tccnumber.append(digit)\n\t# Calculate sum\n\n\tsum = 0\n\tpos = 0\n\n\treversedCCnumber = []\n\treversedCCnumber.extend(ccnumber)\n\treversedCCnumber.reverse()\n\n\twhile pos < length - 1:\n\n\t\todd = int(reversedCCnumber[pos]) * 2\n\t\tif odd > 9:\n\t\t\todd -= 9\n\n\t\tsum += odd\n\n\t\tif pos != (length - 2):\n\t\t\tsum += int(reversedCCnumber[pos + 1])\n\n\t\tpos += 2\n\n\t# Calculate check digit\n\n\tcheckdigit = ((sum / 10 + 1) * 10 - sum) % 10\n\tccnumber.append(str(checkdigit))\n\treturn ''.join(ccnumber)",
"def __padlen(self,l):\n return Utils.padlen(l,self.com.granularity)",
"def __str__(self):\n return textwrap.fill('{:064b}'.format(self.num), 8)",
"def str_mask():\n mask_input = input(\"Provide a long number. \")\n mask_length = len(mask_input)\n last4 = mask_input[-4:]\n mask = multiply(mask_length - 4, \"#\")\n masked_str = mask + last4\n print(masked_str)",
"def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password",
"def createAccountNo():\n account_no = \"\"\n for i in range(8):\n account_no += str(randint(0, 9))\n return int(account_no)",
"def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"",
"def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None",
"def patten2number(sequence):\n try:\n if len(sequence) == 0:\n return 0\n last_base = sequence[-1]\n prefix = sequence[:-1]\n return 4 * patten2number(prefix) + BASE_TO_NUMBER[last_base]\n except KeyError:\n raise ValueError('Not able to convert nucleotide: %s' % last_base)",
"def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass",
"def pad(number, width=0):\n return str(number).zfill(width)",
"def pin(self) -> int:",
"def random_number(length=6):\n return randint(10**(length-1), (10**(length)-1))"
] | [
"0.74236387",
"0.70152956",
"0.6587809",
"0.64308965",
"0.62868375",
"0.6269005",
"0.6230067",
"0.6148351",
"0.6140739",
"0.6132022",
"0.6079048",
"0.6036217",
"0.59997034",
"0.5981399",
"0.59480107",
"0.5926849",
"0.5907118",
"0.58957",
"0.5892246",
"0.58810383",
"0.587727",
"0.58467865",
"0.58465886",
"0.5784075",
"0.57677543",
"0.5763357",
"0.57592607",
"0.57554823",
"0.5751884",
"0.5722338"
] | 0.76024115 | 0 |
Fixture for DataFrame of ints which are constant per column | def int_frame_const_col():
df = DataFrame(
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
columns=["A", "B", "C"],
)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _to_constant_df(self, num):\n if isinstance(num, pd.DataFrame):\n# pdb.set_trace()\n return num\n else:\n return self.data['ones'].copy() * num",
"def test_df():\n return pd.DataFrame({\n 'intcol': [1, 2, 3],\n 'strcol': ['four', 'five', 'six'],\n 'floatcol': [7.0, 8.0, 9.0]\n })",
"def create_dummies(df):",
"def get_cols_dummy():",
"def df_numeric_column(min_value=0, max_value=1, num_rows=100):\n # Generate numeric column\n return pd.Series(np.random.uniform(min_value, max_value, num_rows))",
"def test_column_index(self):\n c = Column('foo', range(3))\n self.assertEqual(c[0], 0)\n self.assertEqual(c[1], 1)\n self.assertEqual(c[2], 2)",
"def test_column_data(self):\n c = Column('foo', range(3))\n self.assertEqual(list(c), [0, 1, 2])",
"def test_sample_constant_column(self):\n # Setup\n instance = GaussianMultivariate()\n X = np.array([\n [1.0, 2.0],\n [1.0, 3.0],\n [1.0, 4.0],\n [1.0, 5.0]\n ])\n instance.fit(X)\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.shape == (5, 2)\n results = result[~result.isna()].all()\n assert results.all()\n assert result.loc[:, 0].equals(pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=0))\n\n # This is to check that the samples on the non constant column are not constant too.\n assert len(result.loc[:, 1].unique()) > 1\n\n covariance = instance.covariance\n assert (~pd.isna(covariance)).all().all()",
"def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)",
"def generate_example() -> pd.DataFrame:\n rng = np.random.RandomState(1234)\n\n df = generate_test_dataframe(n_dims=2, size=2000)\n df[\"date\"] = pd.Timestamp(\"2000-01-01\") + pd.to_timedelta(df[\"dim_0\"], unit=\"D\")\n df[\"month\"] = df[\"date\"].dt.month.astype(np.int8)\n df[\"year\"] = df[\"date\"].dt.year.astype(np.int16)\n df[\"city\"] = \"city_\" + df[\"dim_1\"].astype(\"str\")\n df[\"country\"] = \"country_\" + (df[\"dim_1\"] // 500).astype(\"str\")\n df[\"avg_temp\"] = (\n rng.normal(loc=10.0, scale=5.0, size=len(df))\n .round(decimals=1)\n .astype(np.float32)\n )\n df[\"rain\"] = rng.rand(len(df)) > 0.9\n df[\"mood\"] = \"ok\"\n df.loc[(~df[\"rain\"]) & (df[\"avg_temp\"] > 15), \"mood\"] = \"great\"\n df.loc[(df[\"rain\"]) & (df[\"avg_temp\"] < 5), \"mood\"] = \"sad\"\n return df[[\"date\", \"month\", \"year\", \"city\", \"country\", \"avg_temp\", \"rain\", \"mood\"]]",
"def make_xyz(df: pd.DataFrame,\n response_var: list,\n fixed_var: list = ['Sex']) -> np.ndarray:\n x = pd.get_dummies(df[fixed_var]).to_numpy()\n x = np.fliplr(x)\n n_animals = df.shape[0] # to fix z the the same shape\n z = np.identity(n_animals)[1]\n y = df[[response_var]].to_numpy()\n\n return x, z, y",
"def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()",
"def test_multiple(self):\n df = self.df.copy()\n out = weights_from_array(df.values)\n self.assertTrue(out.size == df.index.size)",
"def create_test_df():\n test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [\n 10 * i for i in range(1, 1001)]})\n test_df['na_col'] = np.nan\n test_df['id_na'] = test_df.id\n test_df.loc[1:3, 'id_na'] = np.nan\n test_df['constant_col'] = 'constant'\n test_df['constant_col_num'] = 0\n test_df['character_factor'] = [\n choice(list('ABCDEFG')) for _ in range(1000)]\n test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]\n test_df['nearzerovar_variable'] = 'most_common_value'\n test_df.loc[0, 'nearzerovar_variable'] = 'one_value'\n test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]\n test_df['character_variable'] = [str(i) for i in range(1000)]\n test_df['duplicated_column'] = test_df.id\n test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700\n test_df['character_variable_fillna'] = ['A'] * \\\n 300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300\n test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200\n test_df['num_variable'] = 100.0\n test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]\n test_df['outlier'] = normal(size=1000)\n test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]\n test_df['outlier_na'] = test_df['outlier']\n test_df.loc[[300, 500], 'outlier_na'] = np.nan\n test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')\n test_df['None_100'] = [1] * 900 + [None] * 100\n test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100\n test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300\n test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300\n test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \\\n ['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \\\n ['Unknown'] * 100 + ['do_not_touch'] * 200\n return test_df",
"def test_factorize_columns_invalid_input(dataframe):\n with pytest.raises(NotImplementedError):\n dataframe.factorize_columns(1)",
"def helper_create_data(n=500):\n N1 = list(np.random.exponential(3, n))\n N2 = list(np.random.normal(2, 2, n))\n N3 = list(np.random.normal(10, 3, n))\n N4 = list(np.random.exponential(2, n))\n C1 = list(np.random.binomial(1, 0.7, n))\n C2 = list(np.random.poisson(1, n))\n C3 = list(np.random.binomial(5, 0.4, n))\n a = ['cat', 'dog', 'lion']\n C4 = list(np.random.choice(a, n))\n df = pd.DataFrame({\n 'C1': C1,\n 'C2': C2,\n 'C3': C3,\n 'N1': N1,\n 'N2': N2,\n 'N3': N3,\n 'N4': N4,\n 'C4': C4\n })\n rows = list(np.random.randint(0, n, 20))\n cols = list(np.random.randint(0, 7, 5))\n df.iloc[rows, cols] = np.nan\n\n return df",
"def testExampleDataFrameGeneration(ref):\n df = generate_dataframe()\n columns = ref.all_fields_except(['random'])\n ref.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)",
"def test_single(self):\n df = self.df.head(1).copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))",
"def autogen_dataset_ratios():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',',\n test_ratio=0.5,\n val_ratio=0.5)",
"def populate_df(df):\n uniques = pd.unique(df.values.ravel('K'))\n zeros = np.zeros(len(uniques))\n\n # main df protection\n df = df.copy(deep=True)\n\n all_dummies = []\n for row in df.itertuples():\n i = 1\n uniques_dic = dict(zip(uniques, zeros))\n while i < 6:\n uniques_dic[row[i]] = 1\n i += 1\n\n all_dummies.append(uniques_dic)\n return pd.DataFrame(all_dummies, columns=uniques)",
"def random_df(request):\n old_state = np.random.get_state()\n\n def fin():\n # tear down: reset the prng after the test to the pre-test state\n np.random.set_state(old_state)\n\n request.addfinalizer(fin)\n np.random.seed(1)\n return pd.DataFrame(\n {'some_count': np.random.randint(1, 8, 20)},\n index=range(0, 20))",
"def test():\n df = df_random()\n print('Random DataFrame')\n print(df.head())\n\n # Test the numerical column generator\n df['delta_v'] = df_numeric_column(-100, 100)\n print('\\nNumerical column generator (added delta_v)')\n print(df.head())\n\n # Test the categorical column generator\n df['color'] = df_categorical_column(['red', 'green', 'blue'])\n print('\\nCategorical column generator (added color)')\n print(df.head())\n\n # Test the categorical column generator with probabilities\n df['color'] = df_categorical_column(['red', 'green', 'blue'], probabilities=[0.6, 0.3, 0.1])\n print('\\nProbabilities should be ~60% red, %30 green and %10 blue')\n print(df['color'].value_counts())\n\n # Also we can just use the built in Numpy method for detailed control\n # over the numeric distribution\n my_series = pd.Series(np.random.normal(0, 1, 1000))\n print('\\nStats on numpy normal (gaussian) distribution')\n print(my_series.describe())",
"def init_individual(index, columns, initializer=None):\n \n ind = pd.DataFrame(0,index=index, columns=columns)\n \n if initializer is not None:\n \n # sets up the DataFrame with the initializer data\n ind.loc[:, 2:] = initializer.loc[:,1:]\n ind.loc[:, 'in'] = initializer.loc[:, 'in']\n \n # sets the age\n for i in index:\n if ind.loc[i,'in'] != 0:\n ind.loc[i,'age'] = 1\n else:\n ind.loc[i,'age'] = 0\n \n # randomly flips a company in or out of the system\n if random.random() < 0.05:\n if ind.loc[i,'in'] == 0:\n ind.loc[i,'in'] = 1\n ind.loc[i,'age'] = 1\n for j in index:\n if i == j:\n ind.loc[i,j] = 0\n else:\n if random.random() < 0.2:\n ind.loc[i,j] = 1\n ind.loc[j,i] = 1\n else:\n ind.loc[i,:] = 0\n ind.loc[:,i] = 0\n \n # randomly flips correlations\n if ind.loc[i,'in'] == 1: \n for j in index:\n if random.random() < 0.05 and i != j:\n ind.loc[i,j] = abs(ind.loc[i,j] - 1)\n ind.loc[j,i] = ind.at[i,j]\n\n else:\n for i in index:\n # randomly places companies in or out of the network\n if random.random() < 0.2:\n ind.loc[i,'in'] = 1\n ind.loc[i,'age'] = 1\n \n # randomly assigns correlations for companies in the network\n if ind.loc[i,'in'] == 1:\n for j in index:\n if i == j:\n ind.loc[i,j] = 0\n else:\n if random.random() < 0.2\n ind.loc[i,j] = 1\n ind.loc[j,i] = ind.at[i,j]\n \n ind.fillna(0)\n\n return ind",
"def test_column_type(self):\n c = Column('foo', range(3), type=int)\n self.assertEqual(list(c), [0, 1, 2])",
"def df_random(num_numeric=3, num_categorical=3, num_rows=100):\n\n # Construct DataFrame\n df = pd.DataFrame()\n column_names = string.ascii_lowercase\n\n # Create numeric columns\n for name in column_names[:num_numeric]:\n df[name] = df_numeric_column(num_rows=num_rows)\n\n # Create categorical columns\n for name in column_names[num_numeric:num_numeric+num_categorical]:\n df[name] = df_categorical_column(['foo', 'bar', 'baz'], num_rows=num_rows)\n\n # Return the dataframe\n return df",
"def test_single(self):\n df = self.df.head(1).copy()\n out = weights_from_array(df.values)\n self.assertTrue(out.size == 1)",
"def fifty_fifty(dataframe) -> pd.DataFrame:\n dataframe[\"allocation\"] = 0.5\n return dataframe",
"def identity_variables_types(data, thres=10):\n data_copy = data.copy()\n res = data.apply(lambda x: x.unique().shape[0], axis=0)\n cat_cols_names = res.index[res <= thres]\n new_cat_names = {name: name + '_cat' for name in cat_cols_names}\n num_cols_names = res.index[res > thres]\n new_num_names = {name: name + '_num' for name in num_cols_names}\n data_copy.rename(columns=new_cat_names, inplace=True)\n data_copy.rename(columns=new_num_names, inplace=True)\n return data_copy",
"def test_scalar_index(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == ()",
"def df(x):\n raise NotImplementedError"
] | [
"0.62194395",
"0.60628957",
"0.5972536",
"0.57523376",
"0.5721764",
"0.56640995",
"0.56414294",
"0.5635445",
"0.559424",
"0.5559713",
"0.55304813",
"0.5526375",
"0.5492284",
"0.54894716",
"0.5483786",
"0.5478982",
"0.5412668",
"0.5365263",
"0.5359209",
"0.5358688",
"0.53106207",
"0.5305762",
"0.5278216",
"0.5258843",
"0.5252441",
"0.524804",
"0.5233342",
"0.52328753",
"0.52319795",
"0.52219546"
] | 0.6852467 | 0 |
Logs a message, preserving the progress bar correct output format. | def log_message(self, message: str) -> None:
from tqdm import tqdm
tqdm.write(message, file=self.tqdm_kwargs.get("file", None)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_message(self, message):\n if self.root.is_logging:\n if len(repr(str(msg))) < 1:\n stdout(msg)\n self.root.log_file.write(\"%.4f\" % time.time() + \" \" + repr(str(msg)) + \"\\n\")\n return",
"def log_and_print(self, message):\n self.f.write(message + \"\\n\")\n print message",
"def log(self, message, indent_amount=0):\n indent = \" \" * indent_amount\n text = \"{indent}{text}\\n\".format(indent=indent, text=message)\n sys.stdout.write(text)",
"def log(message):\n\n # Trim the message.\n result = message.strip()\n\n # Print it to STDOUT.\n print result",
"def log(self, message: str):",
"def msg(message):\n to_stdout(\" --- {message}\".format(message=message))\n if _logger:\n _logger.info(message)",
"def log(self, message):",
"def log_message(self, fmt, *args):\n pass",
"def log(self, message):\n self._log += \"%s\\n\" % message\n print message",
"def log(message, level=\"INFO\"):\r\n print(__get_formatted(message, level))",
"def log(self, msg):\n print(msg)",
"def log_message(self, format, *args):",
"def log(message):\n if type(message) is not str:\n message = str(message)\n print(PREFIX + re.sub('\\n', '\\n' + PREFIX, message))",
"def log_message(self, format, *args):\n self._log(format, args)",
"def log(self, message):\n self._logger.write(message)",
"def log_message(self, msg):\n\t\tself.logView.log_message(msg)",
"def log(self, message, log_level=\"info\"):\n for word in self.__ignore_output:\n while word in message:\n message = message.replace(word, \"\")\n getattr(logger, log_level)(\"{}{}\".format(self.__output_prefix, message.strip()))",
"async def log(self, message, level=logging.INFO):\n\t\tif self.log_queue is not None:\n\t\t\tawait self.log_queue.put(LogEntry(level, self.modulename, message))\n\t\telse:\n\t\t\tprint(str(LogEntry(level, self.modulename, message)))",
"def update_log(self, message):\n self.LogOutput_Field.appendPlainText(message)",
"def _log(self, msg):\n self.telegram_queue.put(f\"{__name__.split('.')[-1]}: {msg}\")",
"def log(msg):\n print(str(msg))",
"def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)",
"def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()",
"def log(self, msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stderr)",
"def write(self, message):\r\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S - ')\r\n self.terminal.write(message)\r\n self.log.write(message)",
"def log_message(message):\r\n\tif os.getenv(\"WWW2PNG_VERBOSE\") == \"true\":\r\n\t\tprint(threading.current_thread().name, message)\r\n\t\tsys.stdout.flush()",
"async def log(self, message, level=logging.INFO):\n\t\tawait self.handle_log(LogEntry(level, self.name, message))",
"def logprint(self, message):\n print message\n self.log += message+\"\\n\"",
"def log_message(msg):\n click.echo(msg, err=True)",
"def log(message, prefix_newline=False):\n print(('\\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))\n sys.stdout.flush()"
] | [
"0.72831756",
"0.7170845",
"0.71509373",
"0.7105534",
"0.7096617",
"0.6908535",
"0.6884731",
"0.6878834",
"0.6869885",
"0.682706",
"0.67774737",
"0.6756506",
"0.67553526",
"0.67249006",
"0.67246664",
"0.6717832",
"0.6699004",
"0.66793716",
"0.66682446",
"0.66388786",
"0.66219264",
"0.66172564",
"0.6579504",
"0.6565332",
"0.6564035",
"0.65462464",
"0.65322405",
"0.6525746",
"0.6520647",
"0.65165347"
] | 0.73927295 | 0 |
Encode the sequence as a list of floats using the provided vocab. | def encode_dna_as_floats(sequence: Iterable[str],
vocab: str = dc_constants.VOCAB,
offset: int = 0) -> Optional[Iterable[float]]:
ids = []
for base in sequence:
if base not in vocab:
return None
base_id = float(vocab.index(base) + offset)
ids.append(base_id)
return ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put_float_list_to_feature(seq_example: tf.train.SequenceExample,\n value: Sequence[Sequence[float]], key: str):\n for s in value:\n seq_example.feature_lists.feature_list.get_or_create(\n key).feature.add().float_list.value[:] = s",
"def encode_sequence(text: List[str], vocab: Dict) -> Sequence[int]:\n return [vocab[ngram] if ngram in vocab else vocab[\"<UNK>\"] for ngram in text]",
"def sequence_to_list_ids(sequence, vocab):\n pass",
"def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]",
"def toFloatList(values):\n\treturn list(map(lambda va: float(va), values))",
"def _convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output",
"def _float_feature(value):\n value = _ensure_list(value)\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def decode(self, seq):\n return [ self.rev_vocab[int(el)] for el in seq ]",
"def dump_vocab(vocab, path, encoding=\"Utf-8\"):\n with open(path, \"w\", encoding=encoding) as fout:\n for word, freq in vocab:\n fout.write(\"%s\\t%d\\n\" % (word, freq))",
"def add_float_feature(val):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[val]))",
"def to_floats(lst):\n vals = []\n for arg in lst:\n vals.append( float(arg) )\n return vals",
"def _bytes_list_feature(values):\n def norm2bytes(value):\n return value.encode() if isinstance(value, str) and six.PY3 else value\n \n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))",
"def _float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def _float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:\n if self.vectorizer:\n detokenized_sample = self.tokenizer([tokens])[0] # str\n vectorized_sample = self.vectorizer([detokenized_sample]) # (voc_size,)\n\n weights = np.array([vectorized_sample[0, np.where(self.vocabulary == token)[0][0]]\n if len(np.where(self.vocabulary == token)[0]) else 0.\n for token in tokens])\n else:\n weights = np.array([self.get_weight(max(self.counter_vocab.get(token, 0), self.idf_base_count))\n for token in tokens])\n\n if sum(weights) == 0:\n weights = np.ones(len(tokens))\n\n embedded_tokens = np.array(self.embedder([tokens]))[0, :, :]\n\n if mean is None:\n mean = self.mean\n\n if mean:\n embedded_tokens = np.average(embedded_tokens, weights=weights, axis=0)\n else:\n embedded_tokens = np.array([weights[i] * embedded_tokens[i] for i in range(len(tokens))])\n\n return embedded_tokens",
"def encode_and_flatten(\n raw_text_iter: IterableDataset[str],\n tokenizer: Callable[[str], list[str]],\n vocab: Vocab,\n) -> torch.Tensor:\n data = [\n torch.tensor(vocab(tokenizer(item)), dtype=torch.long)\n for item in raw_text_iter\n ]\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))",
"def _bytes_list_feature(values):\n def norm2bytes(value):\n return value.encode() if isinstance(value, str) and six.PY3 else value\n\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))",
"def to_context_vec( iterable, context=FloatContext ):\n to_float = context.from_int\n return [to_float(x) for x in iterable]",
"def float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output",
"def float_feature(value):\n if not isinstance(value, list) and not isinstance(value, np.ndarray):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output",
"def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n\tif item in vocab:\n\t output.append(vocab[item])\n\telse:\n\t output.append(vocab['[UNK]'])\n return output",
"def encode_ST_messages(messages, vocab_to_int):\n messages_encoded = []\n for message in messages:\n messages_encoded.append([vocab_to_int[word] for word in message.split()])\n\n return np.array(messages_encoded)",
"def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))",
"def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))",
"def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))",
"def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))",
"def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))"
] | [
"0.55322623",
"0.55027866",
"0.54789644",
"0.5260034",
"0.5258647",
"0.5159166",
"0.5092331",
"0.50536036",
"0.5027336",
"0.50256157",
"0.50224316",
"0.50179625",
"0.5005602",
"0.5005602",
"0.49933308",
"0.49916053",
"0.49875823",
"0.49784765",
"0.49743456",
"0.49743456",
"0.4947289",
"0.49337918",
"0.49190533",
"0.48924547",
"0.48610803",
"0.48557937",
"0.48557937",
"0.48557937",
"0.48345253",
"0.48345253"
] | 0.67705715 | 0 |
Returns the sequence with GAP_OR_PAD and GAP_OR_PAD tokens removed. | def get_sequence_without_gaps_or_padding(sequence: str) -> str:
return sequence.replace(dc_constants.GAP_OR_PAD,
'').replace(dc_constants.GAP_OR_PAD, '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def remove_guff(seqs):\n new_seqs = {}\n stop_codons = [\"TGA\", \"TAA\", \"TAG\"]\n for key, value in seqs.items():\n new_seq = \"\"\n for i in range(len(value)-2):\n if value[i:i+3] == \"ATG\":\n break\n\n for j in range(i, len(value)-2, 3):\n if value[j:j+3] in stop_codons:\n new_seqs[key] = value[i:j+3]\n break\n\n return new_seqs",
"def remove_pad(x, pad_remover, mode):\n # Concatenate all tokens (without padding)\n x = flatten_all_but_last(x)\n\n # Remove padding for training and eval\n if mode != ModeKeys.PREDICT:\n # This is a hack to allows inference when the <go> token\n # is detected as padding and removed. This works for now because there is\n # no padding at inference.\n x = pad_remover.remove(x)\n\n x = tf.expand_dims(x, axis=0) # Now batch_size=1\n return x",
"def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")",
"def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def test_not_gap(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertTrue(not_gap(m[0]))\n self.assertFalse(not_gap(m[5]))",
"def remove_four_and_every_other(seq):\n # Make a copy of the original sequence, but omit the first four and last four elements\n new_seq = seq[4:-4]\n\n # Make a copy of new sequence and step by 2\n new_seq = new_seq[::2]\n\n return new_seq",
"def squeeze_seq(seq):\r\n\r\n return sub(r'([AGCTacgt])\\1+', '\\\\1', seq)",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )",
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def remove_every_other(seq):\n length = len(seq)\n new_seq = seq[0:length:2]\n return new_seq",
"def remove_code_punc(code):\n sec = code\n together = set([\"==\", \"&&\", \"<>\", \"||\"])\n spacing = set([\"+\", \"-\", \"*\", \"/\", \"!\", \"^\"])\n exclude = set([\"=\", \"|\", \"&\", \"[\", \"]\", \"\\r\", \"\\n\", \"(\", \")\", \"{\", \"}\", \":\", \",\", \";\", \".\", '\"', \"'\", \">\", \"<\", \"#\", \"%\", \"$\", \"~\", \"\\\\\", \"?\"])\n new_sec = \"\"\n i = 0\n while i < len(sec):\n try:\n if sec[i:i + 1] in together:\n new_sec += \" \" + sec[i:i+1] + \" \"\n i += 2\n continue\n except:\n print \"last\"\n if sec[i] in exclude:\n new_sec += \" \"\n elif sec[i] in spacing:\n new_sec += \" \" + sec[i] + \" \"\n else:\n new_sec += sec[i]\n i += 1\n new_sec = new_sec.replace(\" \", \" \")\n return new_sec",
"def prune_decoded_seqs(seqs):\n out = []\n for s in seqs:\n if constant.EOS in s:\n idx = s.index(constant.EOS_TOKEN)\n out += [s[:idx]]\n else:\n out += [s]\n return out",
"def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf",
"def prune_decoded_seqs(seqs):\r\n out = []\r\n for s in seqs:\r\n if EOS in s:\r\n idx = s.index(EOS)\r\n out += [s[:idx]]\r\n else:\r\n out += [s]\r\n return out",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def _truncate_seq_pair_RE(tokens_name_1, tokens_name_2, tokens_psg, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_name_1) + len(tokens_name_2) + len(tokens_psg) \n if total_length <= max_length:\n break\n tokens_psg.pop()",
"def strip_proper_pos(text: Union[List[str], str]) -> List[str]:\n\n text = __join_if_list(text)\n try:\n tagged = pos_tag(text.split())\n except LookupError:\n nltk.download('averaged_perceptron_tagger')\n tagged = pos_tag(text.split())\n\n without_propernouns = [word for word, pos in tagged if pos is not 'NPP' and pos is not 'NNPS']\n return without_propernouns",
"def complement_this(seq):\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_seq = ''\n for nuc in seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_seq += compliment_dict[nuc]\n return rev_seq",
"def buildSkipgram(voc, maxlen=50, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n y = []\n syms = set(text) # unique symbols (chars or words)\n\n # build correct sequences of words in context\n for i in range(maxlen, len(text) - maxlen, step):\n context = text[i-maxlen/2: i+maxlen/2]\n sentences.append(context)\n y.append(1)\n\n # build out of context sequences\n for i in range(maxlen, len(text) - maxlen, step):\n random_idx = np.random.random_integers(1, len(text)-1, maxlen)\n out_of_context = [text[x] for x in random_idx]\n sentences.append(out_of_context)\n y.append(0)\n\n print('nb sequences:', len(sentences))\n \n X = np.zeros((len(sentences), maxlen), dtype=np.int)\n\n for i, sentence in enumerate(sentences):\n for j, sym in enumerate(sentence):\n X[i,j] = sym_indices[sym] \n \n y = np.asarray(y)\n\n # shuffle and return\n idx = np.random.permutation(X.shape[0])\n X = X[idx,:]\n y = y[idx]\n\n return (X,y)",
"def test_gap_vector(self):\n\n def g(x):\n return self.RNA(x).gap_vector()\n\n self.assertEqual(g(\"\"), [])\n self.assertEqual(g(\"ACUGUCAGUACGHCSDKCCUCCDNCNS\"), [False] * 27)\n self.assertEqual(\n g(\"GUACGUAACAKADC-SDAHADSAK\"),\n list(map(bool, list(map(int, \"000000000000001000000000\")))),\n )\n self.assertEqual(g(\"-DSHSUHDSS\"), list(map(bool, list(map(int, \"1000000000\")))))\n self.assertEqual(\n g(\"UACHASCAGDS-\"), list(map(bool, list(map(int, \"000000000001\"))))\n )\n self.assertEqual(\n g(\"---CGAUgCAU---ACGHc---ACGUCAGU--?\"),\n list(map(bool, list(map(int, \"111000000001110000011100000000111\")))),\n )",
"def kludge_gvars(mangled):\n # Loop in reverse looking for '+-', but don't run off the end\n for idx in range(len(mangled) - 1)[::-1]:\n if mangled[idx + 1] == '+-':\n reunited = ' '.join(mangled[idx:idx + 3])\n # Throw away the used elements...\n for _ in range(3):\n mangled.pop(idx)\n # Repair the list with reunited gvar string\n mangled.insert(idx, reunited)\n return mangled",
"def get_n_minus_1_grams(n_grams: str) -> str:\n return n_grams.rsplit(' ')[0]",
"def _UnPad(self, padded):\n pad = bytearray(padded)[-1]\n return padded[:-pad]",
"def remove_template_terminal_gaps(candidate,template):\n if len(template) != len(candidate):\n raise ValueError, \\\n \"Sequences must be aligned, but their \"+\\\n \"lengths aren't equal. %d != %d\" % (len(candidate),len(template))\n \n if len(template) == 0:\n return candidate, template\n \n degapped_candidate_len = len(candidate.degap())\n \n candidate = DNA.makeSequence(candidate)\n template = DNA.makeSequence(template)\n \n template_gap_vector = template.gapVector()\n first_non_gap = template_gap_vector.index(False)\n num_three_prime_gaps = template_gap_vector[::-1].index(False)\n last_non_gap = len(template_gap_vector) - num_three_prime_gaps\n \n # Construct the candidate name, which will include the range of bases\n # from the original sequence\n candidate = candidate[first_non_gap:last_non_gap]\n template = template[first_non_gap:last_non_gap]\n candidate_start_pos = first_non_gap + 1\n candidate_end_pos = degapped_candidate_len - num_three_prime_gaps\n candidate_name = candidate.Name\n if candidate_name.endswith('RC'):\n name_delimiter = ':'\n else:\n name_delimiter = ' '\n candidate_name = '%s%s%d..%d' %\\\n (candidate_name,name_delimiter,candidate_start_pos,candidate_end_pos)\n \n return DNA.makeSequence(candidate,Name=candidate_name), template",
"def reconstruct_ngram(self, ngram):\n\n punc_b = ['!', '?', '.', ',', ';', ':', '\\'', ')', ']', '}']\n punc_a = ['(', '[', '}', '$']\n ngram = ' '.join(ngram)\n for p in punc_b:\n ngram = ngram.replace(' '+p, p)\n for p in punc_a:\n ngram = ngram.replace(p+' ', p)\n ngram = re.sub('(^| )BEGQ', ' \"', ngram)\n ngram = re.sub('ENDQ($| )', '\" ', ngram)\n ngram = ngram.replace('DOUBLEDASH', '--')\n return ngram",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def remove_padding(self, data):\n pad_len = ord(data[-1])\n return data[:-pad_len]"
] | [
"0.6750863",
"0.5907528",
"0.5521466",
"0.5289905",
"0.5231796",
"0.51557064",
"0.50116265",
"0.4972692",
"0.49560896",
"0.49461672",
"0.4935586",
"0.48857594",
"0.48742893",
"0.4835272",
"0.4817296",
"0.48110196",
"0.4809287",
"0.4806622",
"0.47794852",
"0.47744012",
"0.47555757",
"0.47546285",
"0.47453615",
"0.4697329",
"0.46932018",
"0.46786076",
"0.46671236",
"0.4652005",
"0.46514508",
"0.46499428"
] | 0.68402666 | 0 |
Returns start and end coordinates of label in the reference genome. Querying the reference genome for these coordinates will produce the label sequence. We need to add 1 to either start or end depending on the orientation of the reference. | def get_label_start_end(
label_base_positions: Iterable[int],
strand: bed_pb2.BedRecord.Strand) -> Tuple[Optional[int], Optional[int]]:
# Gap and padding tokens may have a position of -1, since they are not
# actually present in the reference. Remove all instances of -1, since we do
# not want to consider it when computing min/max position.
valid_label_base_positions = set(label_base_positions)
valid_label_base_positions.discard(-1)
if not valid_label_base_positions:
return None, None
start = min(valid_label_base_positions)
end = max(valid_label_base_positions)
if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:
end += 1
elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:
start -= 1
else:
raise ValueError('Strand must be set.')
return start, end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def labelpos(self):\n return self._labelpos",
"def get_ref_and_start_and_offset(forward_ref_sequence: str,\n strand: bed_pb2.BedRecord.Strand,\n chrom_start: int,\n chrom_end: int) -> Tuple[str, int, int]:\n ref_sequence = forward_ref_sequence\n if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND:\n start = chrom_start\n offset = 1\n elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND:\n start = chrom_end\n offset = -1\n # For the reverse strand, we want the reverse complement.\n ref_sequence = reverse_complement(forward_ref_sequence)\n else:\n raise ValueError('Strand must be set.')\n return ref_sequence, start, offset",
"def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)",
"def get_label_position(self) -> LabelPositionStr:\n return LABEL_POSITION.inverse[self.labelPosition()]",
"def get_feature_start_end(feature_record):\n return (feature_record.location.start.position+1, feature_record.location.end.position)",
"def getPos(self,len,end,nodes):\n start=end\n if self.count==nodes:\n last=len\n else:\n last=end+(int)(len/(nodes+1))\n self.count+=1\n return (start,last)",
"def get_anchor_label(idx1, idx2, true_entity, sen_len):\n if idx1 >=0 and idx2 < sen_len:\n if idx1 in true_entity:\n for candidate in true_entity[idx1]:\n if candidate[0] == idx2:\n return 1, candidate[1] # true entity pair\n return 0, 0 # neg pair\n return -1, -1 # labels out of boundary",
"def coordinates(self, name, start=None, end=None):\n record = self.process(name)\n if not start and not end:\n start = 1\n end = record.end - record.start + 1\n positions = {}\n match_positions = []\n\n if record.strand == '+':\n _start = 1\n for relative, actual in enumerate(range(record.start - 1, record.end),\n start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]\n\n elif record.strand == '-':\n _start = 1\n for relative, actual in enumerate(reversed(range(record.start - 1,\n record.end)), start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]",
"def _get_start(self, variant, reference_start, cigar, ignore_softclip=False):\n indels = get_indel_from_cigar(cigar, ignore_softclip)\n start = variant.POS - reference_start - 1\n # for pos, val in indels.iteritems(): # python2\n for pos, val in indels.items():\n if pos > start:\n break\n if val[0] == 'I':\n start += val[1]\n elif val[0] == 'D':\n start -= val[1]\n return start",
"def _get_label_offset(dataframe, offset=0.01):\n\n x_offset = (dataframe.iloc[:, 0].max() - dataframe.iloc[:, 0].min()) * offset\n y_offset = (dataframe.iloc[:, 1].max() - dataframe.iloc[:, 1].min()) * offset\n\n return x_offset, y_offset",
"def get_labelPositions(y_list, x_list):\n n_labels = len(y_list)\n\n # GET BORDER POINTS\n x_min, x_max = get_min_max(x_list)\n x_mid = (x_max - x_min) / 2\n\n y_min, y_max = get_min_max(y_list)\n y_mid = (y_max - y_min) / 2\n # Border points\n bp1 = np.array(list(product([x_min, x_max, x_mid], \n [y_min, y_max, y_mid])))[:-1]\n\n # Top right points\n # bp2 = np.array(list(product([0., 1.0, 0.75], \n # [0., 1.0, 0.75])))[:-1]\n\n # Bottom right points\n # bp3 = np.array(list(product([0., 1.0, 0.25], \n # [0., 1.0, 0.25])))[:-1] \n #border_points = np.vstack([bp1, bp2, bp3])\n border_points = np.vstack([bp1])\n n_border = border_points.shape[0]\n\n # Initialize placeholders\n ref_points = np.zeros((n_border + n_labels, 2))\n\n label_positions = np.zeros((n_labels, 2))\n label_indices = np.zeros(n_labels, int)\n\n \n \n ref_points[:n_border] = border_points\n\n for i in range(n_labels):\n # GET POSITIONS\n n_points = x_list[i].size\n xy_points = np.zeros((n_points, 2))\n\n xy_points[:, 0] = x_list[i]\n xy_points[:, 1] = y_list[i]\n \n # GET REF POINTS\n dist = get_pairwise_distances(xy_points, ref_points[:n_border + i])\n\n # GET MINIMUM DISTANCES\n min_dist = dist.min(axis=1)\n\n # GET MAXIMUM MINIMUM DISTANCE\n label_index = np.argmax(min_dist)\n label_pos = xy_points[label_index]\n\n ref_points[n_border + i] = label_pos\n label_positions[i] = label_pos\n label_indices[i] = label_index\n\n return label_positions, label_indices",
"def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end",
"def get_DNApos_fromcoords(self,x,y):\n\n # Are we close to the DNA sequence?\n if abs(y-self.seq_row)>10:\n return None\n\n # ok, DNA it is\n pos=int(float(x-self.seq_xstart+4.0)/self.base_scale.get())\n return pos",
"def get_region_seq(record,start,stop):\n segment = record[start:stop]\n segment_seq = segment.seq\n return segment_seq # pure sequence string",
"def coordinates(self, name, start=None, end=None):\n if \"|\" in name:\n self.name = name.split(\"|\")[0]\n else:\n self.name = name\n positions = {}\n match_positions = []\n records = []\n segments = []\n result_segments = []\n for record in self.process(self.name):\n records.append(record)\n records.sort(key=lambda x: int(x.exon_number))\n\n if records[0].strand == '+':\n _start = 1\n for record in records:\n for relative, actual in enumerate(range(record.start, record.end + 1),\n start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(match_positions),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n elif records[0].strand == '-':\n _start = 1\n for record in records:\n for relative, actual in enumerate(reversed(range(record.start,\n record.end + 1)), start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(reversed(match_positions)),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n if len(result_segments) == 0:\n logger.debug('%s, %s, %s' % (name, start, end))\n logger.debug('%s' % str(segments))\n for r in records:\n logger.debug('%s %s %s %s' % (r.scaffold, r.strand,\n r.start, r.end))\n\n return result_segments",
"def labels(self):\n return self.label(self.p_y_given_x)",
"def span(self):\r\n return self._start, self._end",
"def _get_anno_id(self, start):\n if self.orientation > 0: # positive strand\n return '%s:%d' % (self.id, start % 3)\n else: # negative strand\n return '%s:-%d' % (self.id, (-start) % 3)",
"def __coding_coordinate(self):\n region1 = self.long_side_len\n region2 = self.short_side_len\n length = len(self.seq)\n if self.direction == '+':\n a_s = 0\n a_e = region2\n b_s = self.length - region1\n b_e = self.length - 1\n elif self.direction == '-':\n a_s = 0\n a_e = region1\n b_s = self.length - region2\n b_e = self.length - 1\n return (a_s, a_e, b_s, b_e)",
"def getLabel(self):\n return self.content[:12]",
"def getLabels(self):\n return self.numToLabel",
"def get_seq_start_end(target_index, seq_length, sample_every=1):\n half_offset = int((seq_length - 1) / 2) * sample_every\n end_index = target_index + half_offset\n start_index = end_index - (seq_length - 1) * sample_every\n return start_index, end_index",
"def get_label_locs(prop_df):\n locs = [0 for _ in range(5)]\n \n for i in range(4):\n locs[i+1] = locs[i] + prop_df['proportion'].iloc[i]\n \n for i in range(len(locs)):\n locs[i] += prop_df['proportion'].iloc[i]/2.6\n\n return locs",
"def xref(self):\n self.appendString(\"\\n\")\n startxref = self.filesize()\n maximumIndexValue = 0\n for i in self.indirectObjects.keys():\n if i > maximumIndexValue:\n maximumIndexValue = i\n self.appendString(\"xref\\n0 %d\\n\" % (maximumIndexValue+1))\n if self.IsWindows():\n eol = '\\n'\n else:\n eol = ' \\n'\n for i in range(0, maximumIndexValue+1):\n if i in self.indirectObjects:\n self.appendString(\"%010d %05d n%s\" % (self.indirectObjects[i], 0, eol))\n else:\n self.appendString(\"0000000000 65535 f%s\" % eol)\n return (startxref, (maximumIndexValue+1))",
"def _printSequences(self, start, end):\n stubs = ['' for _ in range(self.nChildren())]\n label = dist = ''\n for i in range(self.nChildren()):\n stubs[i] = self._printSequences(self.children[i], start, end)\n if self.dist or self.dist == 0.0:\n dist = ':' + str(self.dist)\n if self.label != None:\n label = str(self.label)\n if self.nChildren() == 0:\n return label + dist\n else:\n stubstr = '('\n for i in range(len(stubs) - 1):\n stubstr += stubs[i] + ','\n return stubstr + stubs[-1] + ')' + label + dist",
"def startAndEnd(self):\n upperRow = 0\n upperCol = 0\n lowerRow = 0\n lowerCol = 0\n if self.selectionMode == kSelectionNone:\n upperRow = self.penRow\n upperCol = self.penCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n elif self.selectionMode == kSelectionAll:\n upperRow = 0\n upperCol = 0\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n elif self.selectionMode == kSelectionBlock:\n upperRow = min(self.markerRow, self.penRow)\n upperCol = min(self.markerCol, self.penCol)\n lowerRow = max(self.markerRow, self.penRow)\n lowerCol = max(self.markerCol, self.penCol)\n elif (self.selectionMode == kSelectionCharacter or\n self.selectionMode == kSelectionLine or\n self.selectionMode == kSelectionWord):\n upperRow = self.markerRow\n upperCol = self.markerCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n if upperRow == lowerRow and upperCol > lowerCol:\n upperCol, lowerCol = lowerCol, upperCol\n elif upperRow > lowerRow:\n upperRow, lowerRow = lowerRow, upperRow\n upperCol, lowerCol = lowerCol, upperCol\n #app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)\n return (upperRow, upperCol, lowerRow, lowerCol)",
"def exonString(genes, startref, endref):\n\tstring = (endref-startref+1)*'N'\n\tfor gene in genes:\n\t\tfor exon in genes[gene].coords:\n\t\t\tstart = exon[0] - startref\n\t\t\tend = exon[1] - startref\n\t\t\texonlength = end - start + 1\n\t\t\texonstring = (exonlength)*'F'\n\t\t\tstring = replaceString(string, exonstring, start)\n\t\t\t\n\treturn string",
"def __get_exon_coordinates(self, exon):\n start = None\n end = None\n if self.__is_padding_enabled():\n start = exon[constants.EXON_PADDED_START]\n end = exon[constants.EXON_PADDED_END]\n else:\n start = exon[constants.EXON_START]\n end = exon[constants.EXON_END]\n return (start, end)",
"def _calculate_label_offset(port):\n offset_position = np.array((-cos(pi/180 * port.orientation),\n -sin(pi/180 * port.orientation)))\n offset_position *= port.width * 0.05\n return offset_position",
"def getReferenceWindow(self, refId, tplStrand, start, end):\n\n refArray = self.refDict[refId].getNumpyWrapper()\n\n # adjust position for reference padding\n start += self.pad\n end += self.pad\n\n # Forward strand\n if tplStrand == 0:\n slc = refArray[start:end]\n slc = np.right_shift(slc, 4)\n return \"\".join(seqMap[x] for x in slc)\n\n # Reverse strand\n else:\n slc = refArray[end:start:-1]\n slc = np.right_shift(slc, 4)\n return \"\".join(seqMapComplement[x] for x in slc)"
] | [
"0.6364321",
"0.6140304",
"0.61087877",
"0.60330933",
"0.60321313",
"0.575407",
"0.5750131",
"0.57237166",
"0.55922985",
"0.55865574",
"0.55369616",
"0.5511564",
"0.5482404",
"0.5407526",
"0.53941387",
"0.53722996",
"0.5353169",
"0.5347849",
"0.53471684",
"0.53188986",
"0.52850187",
"0.5282459",
"0.5278244",
"0.5267832",
"0.5267071",
"0.525514",
"0.52513784",
"0.5243904",
"0.5237795",
"0.52306575"
] | 0.6887572 | 0 |
Gets subreads/encoded field from example as a string. | def get_encoded_subreads_from_example(example):
return example.features.feature['subreads/encoded'].bytes_list.value[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_field(self, bib_entry, field):\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)",
"def __str__(self):\n outstr = self._field1\n return outstr",
"def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]",
"def getFieldStringValue (self, fieldname):\n v = self.getFieldValue ( fieldname )\n return self._table[fieldname].val_py2txt ( v )",
"def get_encoded_deepconsensus_input_from_example(example):\n return example.features.feature[\n 'deepconsensus_input/encoded'].bytes_list.value[0]",
"def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s",
"def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s",
"def get_str(self, name):\n return str(self.field(name).toString())",
"def _nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n value = value[k]\n except KeyError:\n return None\n\n return str(value)",
"def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s",
"def getDataField(self, name): \n if name not in self.__examples: \n raise ValueError(\"Field does not exist: \" + name)\n \n return self.__examples[name]",
"def _get(self, path):\n return ''.join(\n '%s: %s\\n' % (field_name, value)\n for field_name, value in six.iteritems(self.metadata)\n ).encode('utf-8')",
"def get_sequence_string(seq):\n if type(seq) == Bio.SeqRecord:\n seqstr = seq.seq.tostring()\n elif type(seq) == Bio.Seq.Seq:\n seqstr = seq.tostring()\n else:\n seqstr = seq\n return seqstr",
"def field_to_s(self, fieldname, tag = False):\n fieldname = self.__class__.FIELD_ALIAS.get(fieldname, fieldname)\n v = self._data.get(fieldname, None)\n if v is None:\n raise gfapy.NotFoundError(\"Field {} not found\".format(fieldname))\n t = self._field_or_default_datatype(fieldname, v)\n if not isinstance(v, str):\n v = gfapy.Field._to_gfa_field(v, datatype = t, fieldname = fieldname,\n line = self)\n if self.vlevel >= 2:\n gfapy.Field._validate_gfa_field(v, t, fieldname)\n if tag:\n return gfapy.Field._to_gfa_tag(v, fieldname, datatype = t, line = self)\n else:\n return v",
"def getString(self):\n return \"\".join(self.data)",
"def Value(self) -> str:",
"def field(self) -> IMockPin:\n return self[\"field\"]",
"def get_record(self, index, asbase64=False):\n data = self.bank.readrec(int(index))\n if asbase64:\n return base64.b64encode(data[1:]).decode('utf-8')\n else:\n return data[1:]",
"def __repr__(self):\n\n name = self.__class__.__name__\n\n return '%s(\\'%s\\')' % (name, self.raw_field)",
"def subfield():\n return Subfield()",
"def r(self):\n return self.field[0]",
"def get_string(self, **kwargs):\n ...",
"def string(self):\n return self._my_string",
"def getSubDataField(self, name, indices): \n if (indices >= self.__numExamples).any() or (indices < 0).any(): \n raise ValueError(\"Invalid example indices\")\n \n if name not in self.__examples: \n raise ValueError(\"Field does not exist: \" + name)\n\n\n return self.__examples[name][ix_(indices)]",
"def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')",
"def __str__(self):\n\n return self.raw_field",
"def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)",
"def custom_field_serializer(self, arg: SeField[Any]) -> str:\n assert arg.serializer\n return f\"{arg.serializer.name}({arg.varname})\"",
"def base_field(self):\n return self.field[1]",
"def format_sub_part(cls, field, length):\n try:\n if not length:\n raise ValueError\n\n length = int(length)\n return \"`%s`(%d)\" % (field, length)\n\n except ValueError:\n return \"`%s`\" % (field,)"
] | [
"0.5705618",
"0.5460794",
"0.5459715",
"0.539829",
"0.5357855",
"0.5356727",
"0.5356727",
"0.5356539",
"0.53236294",
"0.5237667",
"0.5237599",
"0.5225943",
"0.5169422",
"0.51572955",
"0.5146009",
"0.5126784",
"0.51213324",
"0.51131326",
"0.5083494",
"0.5076718",
"0.5053751",
"0.50475234",
"0.5036504",
"0.5032725",
"0.50315624",
"0.5023475",
"0.50139236",
"0.49975592",
"0.49965534",
"0.49848816"
] | 0.70664877 | 0 |
Gets the subreads/shape field from example as a list of int64. | def get_subreads_shape_from_example(example):
assert len(example.features.feature['subreads/shape'].int64_list.value) == 3
return example.features.feature['subreads/shape'].int64_list.value[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]",
"def read_ints(self):\n return self.numbers",
"def as_list(self) -> List[int]:\n return self.my_partition",
"def get_list_of_int2(self):\n pass",
"def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...",
"def read_ints(self, dtype='i4'):\n return self.read_record(dtype)",
"def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...",
"def _decode_as_serialized_example_list(self, serialized):\n feature_spec = {\n \"serialized_context\": tf.io.FixedLenFeature([1], tf.string),\n \"serialized_examples\": tf.io.VarLenFeature(tf.string),\n }\n features = tf.compat.v1.io.parse_example(serialized, feature_spec)\n # Generate sizes from `serialized_examples`.\n lists = features[\"serialized_examples\"]\n mask = tf.scatter_nd(\n indices=lists.indices,\n updates=tf.ones_like(lists.values, dtype=tf.int32),\n shape=lists.dense_shape)\n sizes = tf.reduce_sum(input_tensor=mask, axis=1)\n return features[\"serialized_context\"], tf.sparse.to_dense(\n lists, default_value=\"\"), sizes",
"def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...",
"def getIntRow(self, int: int) -> typing.List[int]:\n ...",
"def fields(self):\n assert self.is_block()\n assert self.tag () != OCamlValue.DOUBLE_ARRAY_TAG # FIXME not implemented\n\n words = self.size_words()\n if words is None:\n return [None]\n\n a = []\n for i in range(int(words)):\n field = self._unsafe_field(i)\n a.append(field)\n if field is None:\n break # Append a single invalid value to indicate out-of-bounds to the user\n return a",
"def convert_to_array(self): \n self.reads = np.asarray(self.reads, dtype=\"int64\")\n self.sampling=True",
"def get_label_shape_from_example(example):\n assert len(example.features.feature['label/shape'].int64_list.value) == 1\n return example.features.feature['label/shape'].int64_list.value[:]",
"def get_num_passes_from_example(example):\n assert len(\n example.features.feature['subreads/num_passes'].int64_list.value) == 1\n return example.features.feature['subreads/num_passes'].int64_list.value[0]",
"def getLongRow(self, int: int) -> typing.List[int]:\n ...",
"def getRefReads(self):# -> int\n return self.refReads",
"def convert_to_list(self): \n self.reads = list(self.reads)\n self.sampling = False",
"def get_coords(self, field):\n return_list = []\n\n coords = self.coords()\n for i, coord in enumerate(self[field].dims):\n if coord in coords:\n return_list.append(self[coord])\n else:\n return_list.append(np.arange(len(self[field].shape[i])))\n\n return return_list",
"def read_ints(self, size=4, count=1, location=None):\n return_vals = []\n if self._tiff is not None:\n off = self._offset\n if location is not None:\n off = location\n for c in range(count):\n return_vals.append(int.from_bytes(self._tiff[off:off+size], byteorder=self._byteorder))\n off += size\n if location is None:\n self._offset += (count * size)\n return return_vals",
"def get_ids_as_slice_or_list(self):\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def _int64_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def data(self) -> List[int]:\n return self.__ids",
"def data(self) -> List[int]:\n return self.__ids",
"def data(self) -> List[int]:\n return self.__ids",
"async def infer_shape_list_getitem(track, seq, idx):\n seq_sh = await seq['shape']\n return seq_sh.shape",
"def np_ints(self):\n # A numpy-array packaging of the integer input parameters\n return np.array([self.mode, self.n_freqs], dtype=np.int32)",
"def get_data(self) -> List[List[int]]:\n sudoku_array = []\n for row in self.entries:\n sudoku_array.append([0 if entry.text == '' else int(entry.text) for entry in row])\n return sudoku_array",
"def getByteRow(self, int: int) -> typing.List[int]:\n ...",
"def get_oids(features, return_type=\"list\"):\n array = da.FeatureClassToNumPyArray(features, ('OID@'))\n if return_type == \"list\":\n return array['OID@'].tolist()\n return array['OID@']",
"def get_sub_values(self):\n return list()"
] | [
"0.6184817",
"0.5882475",
"0.5881324",
"0.57822245",
"0.57525074",
"0.56289333",
"0.5594792",
"0.5497891",
"0.54603356",
"0.5455274",
"0.5443167",
"0.54085",
"0.53583235",
"0.5348249",
"0.53445643",
"0.53237766",
"0.53161526",
"0.53096575",
"0.5279014",
"0.52352875",
"0.52090454",
"0.5205888",
"0.5205888",
"0.5205888",
"0.51842",
"0.517839",
"0.5157062",
"0.51455456",
"0.5141053",
"0.51400465"
] | 0.7593962 | 0 |
Gets the subreads/num_passes field from example as a list of int64. | def get_num_passes_from_example(example):
assert len(
example.features.feature['subreads/num_passes'].int64_list.value) == 1
return example.features.feature['subreads/num_passes'].int64_list.value[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRefReads(self):# -> int\n return self.refReads",
"def list_of_runnums (ins, exp) :\n try : expruns = experiment_info.experiment_runs(ins, exp)\n #if exp == 'xcs83814' : return []\n except : return []\n\n return [int(rec['num']) for rec in expruns]\n #runs = experiment_info.experiment_runs(ins, exp)\n #lst = []\n #for rec in runs :\n # lst.append( int(rec['num']) )\n #return lst",
"def get_subreads_shape_from_example(example):\n assert len(example.features.feature['subreads/shape'].int64_list.value) == 3\n return example.features.feature['subreads/shape'].int64_list.value[:]",
"def read_ints(self):\n return self.numbers",
"def num(self) -> List[int]:\n return self._num",
"def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads",
"def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]",
"def getAltReads(self):# -> int\n return self.altReads",
"def readlen_cnts(store, filter_srrs=None, keep_srrs=None):\n df = store['prealn/workflow/fastq'].copy()\n df.reset_index(inplace=True)\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n df['len'] = df[['avgLen_R1', 'avgLen_R2']].max(axis=1)\n\n return df.len.min(), df.len.median(), df.len.mode().iloc[0], df.len.max()",
"def getOtherReads(self):# -> int\n return self.otherReads",
"def sum_reads(self, sample):\n total_reads = 0.0\n arts = lims.get_artifacts(samplelimsid = sample.id, process_type = self.process_types)\n for art in arts:\n if art.qc_flag == 'PASSED' and '# Reads' in art.udf:\n total_reads += float(art.udf.get('# Reads'))\n return total_reads/1000000",
"def samples(self) -> int:\n return self._samples",
"def _get_item_lengths(self) -> List[int]:\n return [len(x[0]) for x in self.data]",
"def _get_run_length_ac(self):\n self._run_length_ac = []\n for block in self.data:\n self._run_length_ac.extend(\n encode_run_length(tuple(iter_zig_zag(block))[1:])\n )",
"def get_total_rows(max_passes: int) -> int:\n # For each of `max_subreads`, we have three pieces of information: bases, PW,\n # and IP. We also have four rows for SN, and one for strand.\n # The information is structured as follows:\n # Bases: (0, params.max_passes - 1) represent bases.\n # PW: rows params.max_passes to (params.max_passes * 2 - 1)\n # IP: rows (params.max_passes * 2) to (params.max_passes * 3 - 1)\n # Strand: rows (params.max_passes * 3) to (params.max_passes * 4)\n # CCS+SN: rows (params.max_passes * 4 + 1) to (params.max_passes * 4 + 5)\n # The last five rows are CCS sequence (1), and SN (4).\n return (max_passes * 4) + 5",
"def scan(self) -> list[int]:",
"def get_read_len(wildcards):\n read_len = caseinfo.loc[(wildcards.sample, wildcards.unit), \"read_len\"]\n return int(read_len)",
"def as_list(self) -> List[int]:\n return self.my_partition",
"def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret",
"def scan(self) -> List[int]:",
"def scan(self) -> List[int]:",
"def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside",
"def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside",
"def GetNumSamples(trrecord, samplelists=[]):\n if len(samplelists) == 0: samplelists.append(None)\n return [sum(trrecord.GetGenotypeCounts(samplelist=sl).values()) for sl in samplelists]",
"def __call__(self, example):\n para_counter = data.count_tokens(example['context_tokens'] if not self._iterate_over_example\n else [c for tkn in example['context_tokens'] for c in tkn])\n ques_counter = data.count_tokens(example['ques_tokens'] if not self._iterate_over_example\n else [c for tkn in example['ques_tokens'] for c in tkn])\n counter = para_counter + ques_counter\n return list(counter.items())",
"def info(self) -> list[int]:",
"def get_list_of_int2(self):\n pass",
"def count(self):\n return len(self.read_ints())",
"def num_samples(self):\n return self._ll_tree_sequence.get_num_samples()",
"def getSegments(self) -> List[int]:\n ..."
] | [
"0.5838554",
"0.57468516",
"0.57462114",
"0.55311155",
"0.5446684",
"0.5408164",
"0.5309831",
"0.5301768",
"0.527564",
"0.52182186",
"0.51577634",
"0.5125342",
"0.5124437",
"0.51199293",
"0.51196676",
"0.5108761",
"0.506709",
"0.5051076",
"0.5049883",
"0.5034971",
"0.5034971",
"0.5027258",
"0.5000393",
"0.4964827",
"0.4958005",
"0.49326065",
"0.4914701",
"0.48961666",
"0.48877567",
"0.48547685"
] | 0.78519976 | 0 |
Gets label/encoded field from example as a string. | def get_encoded_label_from_example(example):
return example.features.feature['label/encoded'].bytes_list.value[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None",
"def get_label(self, which_label: str, extra_label: str) -> str:\n result = self.row_dict.get(extra_label)\n if result:\n # We will use this label\n pass\n elif which_label == 'first_label':\n header = self.row_header\n first_label = next((i for i in header if i.startswith('label')),\n None)\n if first_label is None:\n raise LabelNotFoundError()\n result = self.row_dict[first_label]\n elif which_label in self.row_dict:\n result = self.row_dict[which_label]\n else:\n raise LabelNotFoundError()\n str_result = str(result)\n return str_result",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> str:\n return self[\"label\"]",
"def getFieldStringValue (self, fieldname):\n v = self.getFieldValue ( fieldname )\n return self._table[fieldname].val_py2txt ( v )",
"def _get_label(self):\n return self.label",
"def GetLabel(self):\n \n return self.label_str",
"def getLabel(self):\n result = self.content[:12]\n if result == \"\":\n if self.tags:\n result = str(self.tags.first)\n return result",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def __repr__(self):\n return \"<label: %s, input: %s>\" % (self.label,\n super(LabeledExample, self).__repr__())",
"def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def __str__(self):\n return str(self.label)",
"def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]",
"def __str__(self) -> str:\n return f'label: {self.label}, freq: {self.freq}, code: {self.code}'",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label"
] | [
"0.6567466",
"0.6203385",
"0.6174128",
"0.6174128",
"0.6174128",
"0.6174128",
"0.60609",
"0.60340345",
"0.5977775",
"0.59713846",
"0.5863954",
"0.58526266",
"0.58526266",
"0.58526266",
"0.57978487",
"0.5788342",
"0.57807505",
"0.57807505",
"0.57807505",
"0.57807505",
"0.57807505",
"0.57807505",
"0.57807505",
"0.5737766",
"0.57362294",
"0.5728891",
"0.57228255",
"0.57228255",
"0.57228255",
"0.57228255"
] | 0.7369651 | 0 |
Gets the label/shape field from example as a list of int64. | def get_label_shape_from_example(example):
assert len(example.features.feature['label/shape'].int64_list.value) == 1
return example.features.feature['label/shape'].int64_list.value[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None",
"def provide_label(self):\n return [(k, v.shape) for k, v in self.label]",
"def provide_label(self):\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label]",
"def provide_label(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._label]",
"def loadlabels_aslist(filename=None):\n if filename is None:\n filename = os.path.join(os.getenv('HOME'), 'ddc', 'data', 'bpti_labels_ms.txt')\n with open(filename) as src:\n lines = src.read().strip().split('\\n')\n label = [int(l.split()[1]) for l in lines]\n # Account for the last one:\n label.append(label[-1])\n return label",
"def _int64_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def getLabels(self):\n return self.numToLabel",
"def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))",
"def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))",
"def get_input_labels(data_source):\n data = []\n with open(data_source, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n data.append(int(row[0]))\n return data",
"def _int64_feature(value):\n if type(value) != list and type(value) != np.ndarray:\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def int64_feature(value):\n if not isinstance(value, list) and not isinstance(value, np.ndarray):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def get_label(path): # get ED ES label\n label_csv = pd.read_csv(path)\n label_list = []\n trans_list = list(np.array(label_csv).astype(np.int32))\n for i in trans_list:\n temp = []\n for j in i:\n if j >= 0:\n temp.append(j)\n label_list.append(temp)\n return label_list",
"def _int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def __getitem__(self, idx):\n record = self.records[idx]\n return np.array(record['feat']), np.array(record['label'], dtype=np.int64)",
"def encode(cls, instance, typedef=None):\n return list(instance.shape)",
"def _int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _int64_feature(value):\n values = value if isinstance(value, (list, tuple)) else [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))",
"def _int64_feature(value):\n values = value if isinstance(value, (list, tuple)) else [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))",
"def _int64_feature(value):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=[int(v) for v in value]))",
"def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(\n value=[int(v) for v in value]))",
"def int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _int64_feature(value):\n value = _ensure_list(value)\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def _decode_as_serialized_example_list(self, serialized):\n feature_spec = {\n \"serialized_context\": tf.io.FixedLenFeature([1], tf.string),\n \"serialized_examples\": tf.io.VarLenFeature(tf.string),\n }\n features = tf.compat.v1.io.parse_example(serialized, feature_spec)\n # Generate sizes from `serialized_examples`.\n lists = features[\"serialized_examples\"]\n mask = tf.scatter_nd(\n indices=lists.indices,\n updates=tf.ones_like(lists.values, dtype=tf.int32),\n shape=lists.dense_shape)\n sizes = tf.reduce_sum(input_tensor=mask, axis=1)\n return features[\"serialized_context\"], tf.sparse.to_dense(\n lists, default_value=\"\"), sizes",
"def int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))",
"def int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))"
] | [
"0.679036",
"0.6569586",
"0.62951785",
"0.6134868",
"0.6108419",
"0.59921026",
"0.59756845",
"0.5952071",
"0.5952071",
"0.59401375",
"0.58788294",
"0.5849895",
"0.5831298",
"0.5816761",
"0.5816761",
"0.5807753",
"0.5801871",
"0.5795513",
"0.5795513",
"0.5795513",
"0.5794914",
"0.5794914",
"0.57851183",
"0.57735515",
"0.5772636",
"0.5759727",
"0.5748792",
"0.57446235",
"0.5734419",
"0.5734419"
] | 0.74587005 | 0 |
Gets deepconsensus_input/encoded field from example as a string. | def get_encoded_deepconsensus_input_from_example(example):
return example.features.feature[
'deepconsensus_input/encoded'].bytes_list.value[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]",
"def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]",
"def deepconsensus_input_to_example(\n deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,\n example_height: int,\n inference: bool,\n counters: Optional[Dict[str, metrics.Metrics.counter]] = None,\n) -> Optional[tf.train.Example]:\n if not deepconsensus_input.subreads:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n # Get the example_width from the first subreads.\n example_width = len(deepconsensus_input.subreads[0].bases)\n\n # The full example will include 4 rows for the signal to noise ratio (sn)\n # values. The remaining rows will contain three sets of per-base values:\n # the base, pulse width (pw), and interpulse distance (ip). Some models\n # may use only a subset of this information downstream.\n per_base_rows = get_per_base_rows(example_height)\n if per_base_rows < 0 or per_base_rows % 4 != 0:\n raise ValueError('example_height - 5 must be non-negative, and divisible '\n 'by four.')\n max_passes = get_max_passes(example_height)\n\n if len(deepconsensus_input.subreads) > max_passes:\n # Increment a counter if the number of subreads from the\n # deepconsensus_input is more than the `max_passes` derived from the\n # input `example_height`.\n # But still continue.\n if counters and counters['examples_with_discarded_subreads']:\n counters['examples_with_discarded_subreads'].inc()\n\n example = tf.train.Example()\n features = example.features\n data = np.zeros(\n shape=(example_height, example_width, 1), dtype=dc_constants.NP_DATA_TYPE)\n data += dc_constants.GAP_OR_PAD_INT\n\n # Number of subreads is capped at num_subreads. In the cases of fewer\n # subreads, rows are left empty.\n kept_subreads = 0\n # Add extra dimension so that shape is (example_width, 1).\n base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = get_indices(\n max_passes)\n for i in range(min(len(deepconsensus_input.subreads), max_passes)):\n subread = deepconsensus_input.subreads[i]\n # Each tuple should already be padded to the appropriate length.\n assert len(subread.bases) == example_width\n\n encoded_bases = encode_dna_as_floats(subread.bases) # pytype: disable=wrong-arg-types\n assert encoded_bases is not None\n data[base_indices[0] + i] += np.expand_dims(np.array(encoded_bases), -1)\n data[pw_indices[0] + i] += np.expand_dims(np.array(subread.pw), -1)\n data[ip_indices[0] + i] += np.expand_dims(np.array(subread.ip), -1)\n data[strand_indices[0] + i] += np.expand_dims(\n np.expand_dims(np.array(subread.subread_strand), -1), -1)\n kept_subreads += 1\n\n if kept_subreads == 0:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n if deepconsensus_input.ccs_sequence:\n encoded_ccs_bases = encode_dna_as_floats(deepconsensus_input.ccs_sequence) # pytype: disable=wrong-arg-types\n data[slice(*ccs_indices)] += np.expand_dims(np.array(encoded_ccs_bases), -1)\n\n data[slice(*sn_indices)] += np.expand_dims(\n np.expand_dims(np.array(deepconsensus_input.sn), -1), -1)\n\n features.feature['subreads/encoded'].bytes_list.value.append(data.tostring())\n features.feature['subreads/shape'].int64_list.value.extend(data.shape)\n features.feature['subreads/num_passes'].int64_list.value.append(kept_subreads)\n\n if not inference:\n label_bases_list = encode_dna_as_floats(deepconsensus_input.label.bases) # pytype: disable=wrong-arg-types\n assert label_bases_list is not None\n # Final shape of label should be (example_width, ).\n label_matrix = np.array(label_bases_list).astype(dc_constants.NP_DATA_TYPE)\n features.feature['label/encoded'].bytes_list.value.append(\n label_matrix.tostring())\n features.feature['label/shape'].int64_list.value.extend(label_matrix.shape)\n features.feature['deepconsensus_input/encoded'].bytes_list.value.append(\n deepconsensus_input.SerializeToString())\n return example",
"def get_input_data(input_section: Dict) -> str:\n default_value = input_section.get(\"value\")\n if isinstance(default_value, str):\n return default_value\n\n if default_value:\n complex_field = default_value.get(\"complex\")\n if complex_field:\n if complex_field.get(\"accessor\"):\n return f\"{complex_field.get('root')}.{complex_field.get('accessor')}\"\n else:\n return f\"{complex_field.get('root')}\"\n return default_value.get(\"simple\")\n\n return \"\"",
"def get_train_inputs(self, example):\n return example",
"def value_from_example(example, feature_name):\n feature = example.features.feature[feature_name]\n feature_type = feature.WhichOneof('kind')\n return getattr(feature, feature_type).value[:]",
"def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None",
"def variant_from_example(example):\n features = example.features.feature\n var_string = features['variant/encoded'].bytes_list.value[0]\n return variants_pb2.Variant.FromString(var_string)",
"def example_to_data(self, example):\n raise NotImplementedError",
"def record_encoding(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"record_encoding\")",
"def record_encoding(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"record_encoding\")",
"def _get_encoding_form(self, input):\n if self.inference_procedure == 'direct':\n return input\n else:\n raise NotImplementedError",
"def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')",
"def get_encoded(self):\n pass",
"def getInput(self):\n return self.__string",
"def input(self):\n return ''.join([state[1] for state in self.condensed_input_states])",
"def _create_serialized_example(predecessor, current, successor, vocab):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"decode_pre\": _int64_feature(_sentence_to_ids(predecessor, vocab)),\n \"encode\": _int64_feature(_sentence_to_ids(current, vocab)),\n \"decode_post\": _int64_feature(_sentence_to_ids(successor, vocab)),\n }))\n\n return example.SerializeToString()",
"def decode_json_example(json_examples, name=None):\n return gen_parsing_ops.decode_json_example(json_examples, name=name)",
"def process_example(example_string: tf.train.Example,\n schema: tfgnn.GraphSchema):\n spec = tfgnn.create_graph_spec_from_schema_pb(schema)\n graph = tfgnn.parse_single_example(spec, example_string)\n\n # Note: the output tags cannot be structured; they must be single string\n # objects.\n for key, tensor in iter_stats_graph(graph):\n if isinstance(tensor, tf.RaggedTensor):\n tensor = tensor.flat_values\n for value in tensor.numpy().flat:\n yield beam.pvalue.TaggedOutput(key, value)",
"def get_encoded_bytes_string(self, target):\n pass",
"def serialize_example(image_inp_string,image_out_string):\n image_inp_shape = tf.image.decode_jpeg(image_inp_string).shape\n image_out_shape = tf.image.decode_jpeg(image_out_string).shape\n feature = {\n\n 'image_input': _bytes_feature(image_inp_string),\n 'image_output':_bytes_feature(image_out_string),\n }\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n #--------------------------------------------------------------------------------------\n\n ###process image",
"def encode(self, *stuff):\n if self._kv_fmt:\n result = self._encode_wire(stuff[0])\n else:\n result = self._encode_wire(stuff)\n return result.getvalue()",
"def inputString(self):\n return self.__inputString",
"def bytestring_to_record(example):\n rec = tf.train.SequenceExample.FromString(example)\n start_time = rec.context.feature[START_TIME].float_list.value[0]\n vid_id = rec.context.feature[VIDEO_ID].bytes_list.value[0].decode('utf-8')\n labels = list(rec.context.feature[LABELS].int64_list.value)\n data = rec.feature_lists.feature_list[AUDIO_EMBEDDING_FEATURE_NAME]\n features = [b.bytes_list.value for b in data.feature]\n features = np.asarray([np.frombuffer(_[0], dtype=np.uint8)\n for _ in features])\n if features.ndim == 1:\n raise ValueError(\"Caught unexpected feature shape: {}\"\n .format(features.shape))\n\n rows = [{VIDEO_ID: vid_id, LABELS: labels, TIME: np.uint16(start_time + t)}\n for t in range(len(features))]\n\n return features, pd.DataFrame.from_records(data=rows)",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example",
"def queryValue(self):\n return utf8decoder(self.payload)[0]",
"def encode(input):\n return ModelEncoder().encode(input)",
"def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example",
"def get_example(example_id=None):\n # This is all local, requires no external GPT3 calls\n # Return all examples\n if not example_id:\n return json.dumps(gpt.get_all_examples())\n\n example = gpt.get_example(example_id)\n if not example:\n return error(\"id not found\", HTTPStatus.NOT_FOUND)\n return json.dumps(example.as_dict())",
"def get_encoded(self):\n return self.key"
] | [
"0.6439003",
"0.58448774",
"0.53348607",
"0.5202095",
"0.5010533",
"0.49677646",
"0.49160782",
"0.4901157",
"0.48838043",
"0.4850435",
"0.4850435",
"0.4814992",
"0.48128593",
"0.47833014",
"0.47751632",
"0.46998075",
"0.46935028",
"0.4684098",
"0.46840334",
"0.4667676",
"0.4667126",
"0.46581003",
"0.46407884",
"0.46349925",
"0.46234208",
"0.45577094",
"0.45513305",
"0.45432776",
"0.4542643",
"0.4499143"
] | 0.81469035 | 0 |
Returns tf.Example created from the given DeepConsensusInput proto. | def deepconsensus_input_to_example(
deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,
example_height: int,
inference: bool,
counters: Optional[Dict[str, metrics.Metrics.counter]] = None,
) -> Optional[tf.train.Example]:
if not deepconsensus_input.subreads:
if counters and counters['examples_no_subreads_counter']:
counters['examples_no_subreads_counter'].inc()
return
# Get the example_width from the first subreads.
example_width = len(deepconsensus_input.subreads[0].bases)
# The full example will include 4 rows for the signal to noise ratio (sn)
# values. The remaining rows will contain three sets of per-base values:
# the base, pulse width (pw), and interpulse distance (ip). Some models
# may use only a subset of this information downstream.
per_base_rows = get_per_base_rows(example_height)
if per_base_rows < 0 or per_base_rows % 4 != 0:
raise ValueError('example_height - 5 must be non-negative, and divisible '
'by four.')
max_passes = get_max_passes(example_height)
if len(deepconsensus_input.subreads) > max_passes:
# Increment a counter if the number of subreads from the
# deepconsensus_input is more than the `max_passes` derived from the
# input `example_height`.
# But still continue.
if counters and counters['examples_with_discarded_subreads']:
counters['examples_with_discarded_subreads'].inc()
example = tf.train.Example()
features = example.features
data = np.zeros(
shape=(example_height, example_width, 1), dtype=dc_constants.NP_DATA_TYPE)
data += dc_constants.GAP_OR_PAD_INT
# Number of subreads is capped at num_subreads. In the cases of fewer
# subreads, rows are left empty.
kept_subreads = 0
# Add extra dimension so that shape is (example_width, 1).
base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = get_indices(
max_passes)
for i in range(min(len(deepconsensus_input.subreads), max_passes)):
subread = deepconsensus_input.subreads[i]
# Each tuple should already be padded to the appropriate length.
assert len(subread.bases) == example_width
encoded_bases = encode_dna_as_floats(subread.bases) # pytype: disable=wrong-arg-types
assert encoded_bases is not None
data[base_indices[0] + i] += np.expand_dims(np.array(encoded_bases), -1)
data[pw_indices[0] + i] += np.expand_dims(np.array(subread.pw), -1)
data[ip_indices[0] + i] += np.expand_dims(np.array(subread.ip), -1)
data[strand_indices[0] + i] += np.expand_dims(
np.expand_dims(np.array(subread.subread_strand), -1), -1)
kept_subreads += 1
if kept_subreads == 0:
if counters and counters['examples_no_subreads_counter']:
counters['examples_no_subreads_counter'].inc()
return
if deepconsensus_input.ccs_sequence:
encoded_ccs_bases = encode_dna_as_floats(deepconsensus_input.ccs_sequence) # pytype: disable=wrong-arg-types
data[slice(*ccs_indices)] += np.expand_dims(np.array(encoded_ccs_bases), -1)
data[slice(*sn_indices)] += np.expand_dims(
np.expand_dims(np.array(deepconsensus_input.sn), -1), -1)
features.feature['subreads/encoded'].bytes_list.value.append(data.tostring())
features.feature['subreads/shape'].int64_list.value.extend(data.shape)
features.feature['subreads/num_passes'].int64_list.value.append(kept_subreads)
if not inference:
label_bases_list = encode_dna_as_floats(deepconsensus_input.label.bases) # pytype: disable=wrong-arg-types
assert label_bases_list is not None
# Final shape of label should be (example_width, ).
label_matrix = np.array(label_bases_list).astype(dc_constants.NP_DATA_TYPE)
features.feature['label/encoded'].bytes_list.value.append(
label_matrix.tostring())
features.feature['label/shape'].int64_list.value.extend(label_matrix.shape)
features.feature['deepconsensus_input/encoded'].bytes_list.value.append(
deepconsensus_input.SerializeToString())
return example | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_encoded_deepconsensus_input_from_example(example):\n return example.features.feature[\n 'deepconsensus_input/encoded'].bytes_list.value[0]",
"def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)",
"def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)",
"def _make_example_proto(inputs, labels):\n ex = tf.train.SequenceExample()\n fl_inputs = ex.feature_lists.feature_list['inputs']\n fl_labels = ex.feature_lists.feature_list['labels']\n for input_at_t, label_at_t in zip(inputs, labels):\n fl = fl_inputs.feature.add()\n for item in input_at_t:\n fl.float_list.value.append(item)\n fl_labels.feature.add().int64_list.value.append(label_at_t)\n return ex",
"def _make_example(X, y, n, target_type='int'):\n\n feature = {}\n feature['X'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=X.flatten()))\n feature['n'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=n.flatten()))\n\n if target_type == 'int':\n feature['y'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=y.flatten()))\n elif target_type in ['float', 'signal']:\n y = y.astype(np.float32)\n feature['y'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=y.flatten()))\n else:\n raise ValueError('Invalid target type.')\n\n # Construct the Example proto object\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n return example",
"def _create_serialized_example(predecessor, current, successor, vocab):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"decode_pre\": _int64_feature(_sentence_to_ids(predecessor, vocab)),\n \"encode\": _int64_feature(_sentence_to_ids(current, vocab)),\n \"decode_post\": _int64_feature(_sentence_to_ids(successor, vocab)),\n }))\n\n return example.SerializeToString()",
"def _convert_single_example(example, max_seq_length, tokenizer):\n tokens = [\"[CLS]\"]\n tokens.extend(example.words)\n tokens.append(\"[SEP]\")\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n label_ids = [_PADDING_LABEL_ID]\n label_ids.extend(example.label_ids)\n label_ids.append(_PADDING_LABEL_ID)\n\n segment_ids = [0] * len(input_ids)\n input_mask = [1] * len(input_ids)\n\n # Pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(_PADDING_LABEL_ID)\n\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"label_ids\"] = create_int_feature(label_ids)\n features[\"sentence_id\"] = create_int_feature([example.sentence_id])\n features[\"sub_sentence_id\"] = create_int_feature([example.sub_sentence_id])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example",
"def create_example_train(row, vocab):\n context, utterance, label = row\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n label = int(float(label))\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n example.features.feature[\"label\"].int64_list.value.extend([label])\n return example",
"def parser(record):\n\n record_spec = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"type_id\": tf.FixedLenFeature([1], tf.int64),\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n inputs = example[\"inputs\"]\n inp_len = tf.shape(inputs)[0]\n\n # expand type id to full length\n example[\"type_id\"] = tf.broadcast_to(example[\"type_id\"], [inp_len])\n\n # convert all sparse example to dense\n example = sparse_to_dense(example)\n\n return example",
"def state_as_example(state):\n base = state['base']\n tokens_per_statement = state['tokens_per_statement']\n target_output_length = state['target_output_length']\n mod = state['mod']\n output_mod = state['output_mod']\n python_source_lines = []\n for instruction in state['instructions']:\n ast_node = instruction.node\n python_source_line = astunparse.unparse(ast_node, version_info=(3, 5))\n python_source_line = python_source_line.strip()\n python_source_lines.append(python_source_line)\n python_source = '\\n'.join(python_source_lines)\n return control_flow_programs_features.generate_example_from_python_object(\n executor=state['executor'],\n base=base,\n python_object=python_source,\n tokens_per_statement=tokens_per_statement,\n target_output_length=target_output_length,\n mod=mod,\n output_mod=output_mod)",
"def decode_example(\n serialized_proto: str,\n use_example_weight: bool = True) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:\n name_to_features = dict(\n context=tf.io.VarLenFeature(tf.int64),\n label=tf.io.FixedLenFeature([1], tf.int64))\n examples = tf.io.parse_single_example(serialized_proto, name_to_features)\n features = collections.OrderedDict()\n for name in examples:\n feature_content = examples[name]\n if feature_content.dtype == tf.int64:\n tf.cast(feature_content, tf.int32)\n if isinstance(feature_content, tf.SparseTensor):\n feature_content = tf.sparse.to_dense(feature_content)\n features[name] = feature_content\n\n if use_example_weight:\n # The returned example is in the format of ({'context': a list of movie IDs,\n # 'label': next movie ID}, example weight). Using 1.0 as the weight here.\n output = (features, tf.constant(1.0))\n else:\n # If using global similarity and global recall, return (features,\n # features['label']) instead.\n output = (features, features[\"label\"])\n return output",
"def __parser__(self, example_proto):\n # configure feature and label length\n # It is crucial that for tf.string, the length is not specified, as the data is stored as a single string!\n x_config = tf.FixedLenFeature([], tf.string) \\\n if self.x_dtype == tf.string else tf.FixedLenFeature([self.num_features], self.x_dtype)\n if self.num_labels == 0:\n proto_config = {'x': x_config}\n else:\n y_config = tf.FixedLenFeature([], tf.string) \\\n if self.y_dtype == tf.string else tf.FixedLenFeature([self.num_labels], self.y_dtype)\n proto_config = {'x': x_config, 'y': y_config}\n\n # decode examples\n datum = tf.parse_single_example(example_proto, features=proto_config)\n if self.x_dtype == tf.string: # if input is string / bytes, decode it to float32\n if self.decode_jpeg:\n # first decode compressed image string to uint8, as data is stored in this way\n # datum['x'] = tf.image.decode_image(datum['x'], channels=3)\n datum['x'] = tf.image.decode_jpeg(datum['x'], channels=3)\n else:\n # first decode data to uint8, as data is stored in this way\n datum['x'] = tf.decode_raw(datum['x'], tf.uint8)\n # then cast data to tf.float32 or tf.float16\n datum['x'] = tf.cast(datum['x'], tf.float32)\n # cannot use string_to_number as there is only one string for a whole sample\n # datum['x'] = tf.strings.to_number(datum['x'], tf.float32) # this results in possibly a large number\n\n # return data\n if 'y' in datum:\n # y can be present in many ways:\n # 1. a single integer, which requires y to be int32 or int64 (e.g, used in tf.gather in cbn)\n # 2. num-class bool/integer/float variables. This form is more flexible as it allows multiple classes and\n # prior probabilities as targets\n # 3. float variables in regression problem.\n # but...\n # y is stored as int (for case 1), string (for other int cases), or float (for float cases)\n # in the case of tf.string and tf.int64, convert to to int32\n if self.y_dtype == tf.string:\n # avoid using string labels like 'cat', 'dog', use integers instead\n datum['y'] = tf.decode_raw(datum['y'], tf.uint8)\n datum['y'] = tf.cast(datum['y'], tf.int32)\n else:\n datum['y'] = tf.cast(datum['y'], self.y_dtype)\n if self.use_one_hot_label:\n datum['y'] = tf.reshape(tf.one_hot(datum['y'], self.num_classes), (-1, ))\n if self.use_smooth_label: # label smoothing\n datum['y'] = 0.9 * datum['y'] + 0.1 / self.num_classes\n return datum['x'], datum['y']\n else:\n return datum['x']",
"def input_fn():\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=sample_length)\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: tf.parse_single_example(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d",
"def convert_to_tf_example(\n patient_data: Tuple[str, Dict[str, object]]\n) -> tf.train.Example:\n try:\n data = patient_data[1]\n patient = data[\"patient\"][0]\n studies = data[\"studies\"][0]\n \n features = convert_patient_to_feature(patient)\n for study_id, study in studies:\n study_data = convert_study_to_feature(study)\n for feature in study_data:\n features.update(feature)\n return tf.train.Example(features=tf.train.Features(feature=features),)\n except Exception as e:\n _logger.error(\n f\"Error occurred when creating a TFRecord. patient_data: {data.get('patient', data)}. Error: {e}.\"\n )\n return tf.train.Example(features=tf.train.Features(feature={}),)",
"def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.logging.info(\"%s: %s\", k, v)\n\n return example",
"def build(input_reader_config, batch_size=None, transform_input_data_fn=None, multi_gpu=True):\n if not isinstance(input_reader_config, input_reader_pb2.InputReader):\n raise ValueError('input_reader_config not of type '\n 'input_reader_pb2.InputReader.')\n\n if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':\n config = input_reader_config.tf_record_input_reader\n if not config.input_path:\n raise ValueError('At least one input path must be specified in '\n '`input_reader_config`.')\n\n label_map_proto_file = None\n if input_reader_config.HasField('label_map_path'):\n label_map_proto_file = input_reader_config.label_map_path\n decoder = tf_example_decoder.TfExampleDecoder(\n load_instance_masks=input_reader_config.load_instance_masks,\n instance_mask_type=input_reader_config.mask_type,\n label_map_proto_file=label_map_proto_file,\n use_display_name=input_reader_config.use_display_name,\n num_additional_channels=input_reader_config.num_additional_channels)\n\n def process_fn(value):\n \"\"\"Sets up tf graph that decodes, transforms and pads input data.\"\"\"\n processed_tensors = decoder.decode(value)\n if transform_input_data_fn is not None:\n processed_tensors = transform_input_data_fn(processed_tensors)\n return processed_tensors\n\n dataset = read_dataset(\n functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),\n config.input_path[:], input_reader_config)\n if multi_gpu:\n dataset = dataset.shard(hvd.size(), hvd.rank())\n # TODO(rathodv): make batch size a required argument once the old binaries\n # are deleted.\n if batch_size:\n num_parallel_calls = batch_size * input_reader_config.num_parallel_batches\n else:\n num_parallel_calls = input_reader_config.num_parallel_map_calls\n dataset = dataset.map(\n process_fn,\n num_parallel_calls=num_parallel_calls)\n if batch_size:\n dataset = dataset.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)\n return dataset\n\n raise ValueError('Unsupported input_reader_config.')",
"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids, segment_ids, input_mask = \\\n tokenizer.encode_text(text_a=example.text_a,\n text_b=example.text_b,\n max_seq_length=max_seq_length)\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n logging.info(\"*** Example ***\")\n logging.info(\"guid: %s\", example.guid)\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logging.info(\"input_ids length: %d\", len(input_ids))\n logging.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n logging.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature",
"def tf_example_parser(example):\n def _get_feature_map():\n \"\"\"Returns data format of the serialized tf record file.\"\"\"\n return {\n # 3 sparse feature with variable length. Use this if you have a\n # variable number or more than 1 feature value per example.\n \"feature_1\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_2\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_3\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"label\":\n tf.io.FixedLenFeature([1], dtype=tf.int64),\n }\n example = tf.io.parse_single_example(example, _get_feature_map())\n return example",
"def get_train_inputs(self, example):\n return example",
"def convert(self, example):\n tf_example = _convert_to_tf_example(example, self.tokenizer, self.rules,\n self.config, self.max_sizes)\n return tf_example",
"def parse_from_example_in_example(serialized,\n list_size=None,\n context_feature_spec=None,\n example_feature_spec=None,\n size_feature_name=None,\n mask_feature_name=None,\n shuffle_examples=False,\n seed=None):\n parser = _ExampleInExampleParser(\n list_size=list_size,\n context_feature_spec=context_feature_spec,\n example_feature_spec=example_feature_spec,\n size_feature_name=size_feature_name,\n mask_feature_name=mask_feature_name,\n shuffle_examples=shuffle_examples,\n seed=seed)\n return parser.parse(serialized)",
"def example_serving_input_fn():\n example_bytestring = tf.placeholder(\n shape=[None],\n dtype=tf.string,\n )\n feature_scalars = tf.parse_example(\n example_bytestring,\n tf.feature_column.make_parse_example_spec(INPUT_COLUMNS)\n )\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in feature_scalars.iteritems()\n }\n return tf.contrib.learn.InputFnOps(\n features,\n None, # labels\n {'example_proto': example_bytestring}\n )",
"def create_example_test(row, vocab):\n context, utterance = row[:2]\n distractors = row[2:]\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n\n # Distractor sequences\n for i, distractor in enumerate(distractors):\n dis_key = \"distractor_{}\".format(i)\n dis_len_key = \"distractor_{}_len\".format(i)\n # Distractor Length Feature\n dis_len = len(next(vocab._tokenizer([distractor])))\n example.features.feature[dis_len_key].int64_list.value.extend([dis_len])\n # Distractor Text Feature\n dis_transformed = transform_sentence(distractor, vocab)\n example.features.feature[dis_key].int64_list.value.extend(dis_transformed)\n return example",
"def parse_serialized_simulation_example(example_proto, metadata):\n if 'context_mean' in metadata:\n feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT\n else:\n feature_description = _FEATURE_DESCRIPTION\n context, parsed_features = tf.io.parse_single_sequence_example(\n example_proto,\n context_features=_CONTEXT_FEATURES,\n sequence_features=feature_description)\n for feature_key, item in parsed_features.items():\n convert_fn = functools.partial(\n convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])\n parsed_features[feature_key] = tf.py_function(\n convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])\n\n # There is an extra frame at the beginning so we can calculate pos change\n # for all frames used in the paper.\n position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]\n\n # Reshape positions to correct dim:\n parsed_features['position'] = tf.reshape(parsed_features['position'],\n position_shape)\n # Set correct shapes of the remaining tensors.\n sequence_length = metadata['sequence_length'] + 1\n if 'context_mean' in metadata:\n context_feat_len = len(metadata['context_mean'])\n parsed_features['step_context'] = tf.reshape(\n parsed_features['step_context'],\n [sequence_length, context_feat_len])\n # Decode particle type explicitly\n context['particle_type'] = tf.py_function(\n functools.partial(convert_fn, encoded_dtype=np.int64),\n inp=[context['particle_type'].values],\n Tout=[tf.int64])\n context['particle_type'] = tf.reshape(context['particle_type'], [-1])\n return context, parsed_features",
"def call(self, inputs: Dict[str, tf.Tensor]) -> tf.Tensor:\n dense_features = inputs[\"dense_features\"]\n sparse_features = inputs[\"sparse_features\"]\n\n sparse_embeddings = self._embedding_layer(sparse_features)\n # Combine a dictionary to a vector and squeeze dimension from\n # (batch_size, 1, emb) to (batch_size, emb).\n sparse_embeddings = tf.nest.flatten(sparse_embeddings)\n\n sparse_embedding_vecs = [\n tf.squeeze(sparse_embedding) for sparse_embedding in sparse_embeddings\n ]\n dense_embedding_vec = self._bottom_stack(dense_features)\n\n interaction_args = sparse_embedding_vecs + [dense_embedding_vec]\n interaction_output = self._feature_interaction(interaction_args)\n feature_interaction_output = tf.concat(\n [dense_embedding_vec, interaction_output], axis=1)\n\n prediction = self._top_stack(feature_interaction_output)\n\n return tf.reshape(prediction, [-1])",
"def make_example(self, tensor_dict):\n image = tensor_dict['image']\n image = image.tobytes()\n label = tensor_dict['label']\n return tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image':\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),\n 'label':\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n }))",
"def parse_from_tf_example(serialized,\n context_feature_spec=None,\n example_feature_spec=None,\n size_feature_name=None,\n mask_feature_name=None):\n batch_size = tf.shape(serialized)[0]\n features = tf.io.parse_example(\n serialized, features={\n **context_feature_spec,\n **example_feature_spec\n })\n for feature_key, feature_type in example_feature_spec.items():\n if isinstance(feature_type, tf.io.RaggedFeature):\n features[feature_key] = tf.expand_dims(features[feature_key], axis=1)\n else:\n # feature is either a Tensor or SparseTensor.\n features[feature_key] = utils.reshape_first_ndims(features[feature_key],\n 1, [batch_size, 1])\n if size_feature_name is not None:\n # Populate features with a size feature of value 1, corresponding to only\n # one example per list.\n features[size_feature_name] = tf.ones(shape=[batch_size])\n if mask_feature_name:\n features[mask_feature_name] = tf.sequence_mask(tf.ones(shape=[batch_size]))\n return features",
"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n print(\"*** Example ***\")\n print(\"guid: %s\" % (example.guid))\n print(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n print(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature",
"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature",
"def parse_example(example):\n features = {\n 'input_ids': tf.io.VarLenFeature(tf.int64),\n 'label_ids': tf.io.VarLenFeature(tf.int64)\n }\n\n parsed_example = \\\n tf.io.parse_single_example(\n example, features=features)\n\n return {\n k: tf.sparse.to_dense(v) for k, v in\n parsed_example.items()\n }"
] | [
"0.6588792",
"0.5950223",
"0.58684516",
"0.5803522",
"0.5487936",
"0.5469294",
"0.5404084",
"0.5355273",
"0.527753",
"0.51956975",
"0.5192631",
"0.51771414",
"0.516648",
"0.51630205",
"0.51613873",
"0.51525664",
"0.5150702",
"0.5136441",
"0.51273745",
"0.5113387",
"0.511311",
"0.5080902",
"0.5075967",
"0.5065289",
"0.50422794",
"0.5037411",
"0.5014086",
"0.5007562",
"0.5007562",
"0.4947337"
] | 0.70847815 | 0 |
Add external padding to bases, PW, IP, and cigar. | def pad_bases_pw_ip_cigar(read: deepconsensus_pb2.Subread,
padded_len: int) -> None:
pad_amt = padded_len - len(read.bases)
if pad_amt > 0:
str_padding = dc_constants.GAP_OR_PAD * pad_amt
list_padding = [dc_constants.GAP_OR_PAD_INT] * pad_amt
read.bases = read.bases + str_padding
read.pw[:] = list(read.pw) + list_padding
read.ip[:] = list(read.ip) + list_padding
read.expanded_cigar = read.expanded_cigar + str_padding | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)",
"def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')",
"def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))",
"def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)",
"def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)",
"def add_padding(self):\n pad_len = 8 - (len(self.text) % 8)\n self.text += pad_len * chr(pad_len)",
"def pad_with_buffer(b: bytes, pad: bytes) -> bytes:\n assert b\n assert pad\n\n b += pad\n b = pkcs_7(b, 16)\n\n return b",
"def pad(self) -> dict:\n raise NotImplementedError",
"def pad_pw_ip(subreads: List[reads_pb2.Read], max_length: int) -> None:\n for read in subreads:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n assert len(pw) == len(ip)\n pad_length = max_length - len(pw)\n pw_ip_padding = [dc_constants.GAP_OR_PAD_INT] * pad_length\n struct_utils.set_int_field(read.info, 'pw', pw + pw_ip_padding)\n struct_utils.set_int_field(read.info, 'ip', ip + pw_ip_padding)",
"def pad_encoded_text(self, encoded_text):\n\n\t\textra_padding = 8 - len(encoded_text) % 8#calculmaos cuanto falta por agregar\n\t\tfor i in range(extra_padding):\n\t\t\tencoded_text += \"0\"\n\n\t\tpadded_info = \"{0:08b}\".format(extra_padding)#le agregamos una informacion adicionar la cual utilizaremos despues al comprimir para saber cuantos 0 le agregamos y despues poder eliminarlos\n\t\tencoded_text = padded_info + encoded_text\n\t\treturn encoded_text",
"def pkcs5_pad(self,s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)",
"def _add_padding(self, instance):\n bit_length = (len(hex(instance)) - 2) * 4\n desired_padding_size = self.desired_instance_bits - bit_length\n padding = (2 ** desired_padding_size) - 1\n return self._append_hex(padding, instance)",
"def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str",
"def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n\n if self.data_format == 'channels_last':\n if self.rank == 1:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n elif self.rank == 2:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0]]\n elif self.rank == 3:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0], [0, 0]]\n else:\n raise ValueError()\n return causal_padding\n else:\n raise ValueError('No support for NCHW yet')",
"def pkcsPadding():\n test_data = [(20, 'This is a Saturday'),(16, 'NO PAIN NO GAIN!')]\n\n for padlength,data in test_data:\n print padlength, repr(data), repr(pkcs7_pad(padlength, data))",
"def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])",
"def pkcs7_pad(blocklength, text):\n padlen = blocklength - len(text) % blocklength\n return text + chr(padlen) * padlen",
"def padding(self, data):\n\n\t\tif len(data) > 64:\n\t\t\traise error(\"Data exceeds packet length\")\n\n\t\tdata = data + \"@\"\n\t\twhile len(data) < 64:\n\t\t\tdata = data + \"0\"\n\t\t\n\t\treturn data",
"def add_padding(img, x_padding):\n w = img.shape[1] + x_padding * 2\n img_with_padding = np.zeros((img.shape[0], w, 3), dtype=img.dtype)\n img_with_padding[:, x_padding:img.shape[1] + x_padding] = img\n return img_with_padding",
"def _Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + util.RepeatByte(pad, pad)",
"def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def run_padding(self):\n\n image_padded, mask, self.pad_to_right, self.pad_to_bottom = gen_padded_image_and_mask (os.path.join('utils_dfn/temp', self.file_name_with_ext),\n self.new_height, self.new_width)\n cv2.imwrite(os.path.join('utils_dfn/img', self.file_name + '_padded_resized.png'), image_padded)\n cv2.imwrite(os.path.join('utils_dfn/mask', self.file_name + '_mask.png'), mask)",
"def pad_upper(self, data, options, padding):\n # data, options = nrrd.read(input_file_name)\n rows, columns, depths = data.shape\n\n # numpy.fill\n for i in range(padding):\n padding_layer = [[self.AIR] * columns for j in range(rows)]\n data = self.concatenate_layers(data, padding_layer)\n\n options['sizes'][2] += padding # update depths\n return (data, options)",
"def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)",
"def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img",
"def add_padding(text1: str) -> str:\n\n pad_len = 8 - (len(text1) % 8)\n return text1 + (pad_len * '\\0')",
"def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING",
"def padding(message):\n\n # Convert the string to bits by calling the tobits function\n mbits = tobits(message)\n # Get the length of bits\n length = len(mbits)\n # Calculate the strengthening vector length\n strengthmessage = (bin(int(length))[2:]).zfill(64 * ((len(bin(int(length))[2:]) + 63) // 64))\n\n # Create a padding which starts with 1\n padding = '1'\n # Get the number of zeroes to pad\n get_length = 128 - (length + 64) % 128\n # Run the for loop to append all 0's\n for i in range(0, get_length - 1):\n padding = padding + '0'\n\n # Make the entire pad \n to_add_pad = padding + strengthmessage\n # Return the entire pad\n return to_add_pad",
"def find_padding(known, iter_len=1):\n pad = None\n starting_length = oracle(known)\n for i in range(32):\n test_pad = random_nonb64_string(i)\n padded_length = oracle(known + test_pad)\n if padded_length != starting_length:\n pad = test_pad[:-iter_len]\n break\n return pad",
"def test_pad_b64(self):\n test1 = {\"value\": b\"any carnal pleasure.\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4=\"}\n test2 = {\"value\": b\"any carnal pleasure\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ==\"}\n test3 = {\"value\": b\"any carnal pleasur\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\"}\n\n for test in [test1, test2, test3]:\n padded = oidc._pad_b64(test[\"unpadded\"])\n self.assertEqual(test[\"padded\"], padded)\n value = base64.b64decode(padded)\n self.assertEqual(test[\"value\"], value)"
] | [
"0.6520033",
"0.62590957",
"0.61074096",
"0.6048786",
"0.60357",
"0.6028013",
"0.6009145",
"0.60039216",
"0.59985995",
"0.5989889",
"0.5911762",
"0.5902912",
"0.5881732",
"0.5876407",
"0.58472306",
"0.5836514",
"0.58284765",
"0.58035165",
"0.5787002",
"0.57860774",
"0.5778415",
"0.5736226",
"0.57296336",
"0.57167727",
"0.5711578",
"0.5711396",
"0.57071793",
"0.56877863",
"0.56826526",
"0.5667363"
] | 0.7777972 | 0 |
Returns the max passes for bases/PW/IP. | def get_max_passes(example_height: int) -> int:
return (example_height - 5) // 4 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_pw(self, entity):\n return float(entity['pw_bb'][1])",
"def get_password_count(frm, to, is_adjascent):\n\n num = frm\n\n # make number compatible with #1\n if num < 100000:\n num = 100000\n\n digits = []\n while num > 0:\n digit = num % 10\n digits.append(digit)\n num //= 10\n digits.reverse()\n\n count = 0\n\n while get_num(digits) < to:\n if is_adjascent(digits):\n count += 1\n digits = get_next_pass(digits)\n\n return count",
"def maxTimes(self)->int:\n return self._lic.params['maxAccessTimes'].value",
"def get_max_win_strength(self):\n if self.maxWinStrength is None:\n self.calculate_max_win_strength()\n return self.maxWinStrength",
"def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])",
"def getMaxTime(self,hashSpeed):\r\n\r\n possiblePasswords = 0\r\n length = self.length\r\n while length > 0:\r\n possiblePasswords += len(self.charset) ** length\r\n length -= 1\r\n seconds = possiblePasswords // hashSpeed\r\n\r\n self.log('Signal','Maximum seconds to run is %ds with a HashSpeed of %d and %d possibilities' % (seconds,hashSpeed,possiblePasswords),'getMaxTime')\r\n\r\n return seconds",
"def part_2(distances: Distances) -> int:\n\n result, _ = max(generate_routes(distances))\n print(f\"part 2: longest route has distance {result}\")\n return result",
"def get_max_turn(self):\n return self.__max_turn",
"def __find_max_distance(self):\n return utils.find_max_distance(self.__game)",
"def fail_max(self) -> int:\n return self._fail_max",
"def count_routes_max_stops(self, src, dest, max_stops):\n\n criteria = lambda stops, distance: stops <= max_stops # inconsistent max, per test cases\n return len(self.routes_with_criteria(src, dest, criteria))",
"def get_total_rows(max_passes: int) -> int:\n # For each of `max_subreads`, we have three pieces of information: bases, PW,\n # and IP. We also have four rows for SN, and one for strand.\n # The information is structured as follows:\n # Bases: (0, params.max_passes - 1) represent bases.\n # PW: rows params.max_passes to (params.max_passes * 2 - 1)\n # IP: rows (params.max_passes * 2) to (params.max_passes * 3 - 1)\n # Strand: rows (params.max_passes * 3) to (params.max_passes * 4)\n # CCS+SN: rows (params.max_passes * 4 + 1) to (params.max_passes * 4 + 5)\n # The last five rows are CCS sequence (1), and SN (4).\n return (max_passes * 4) + 5",
"def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd",
"def max_creds(self) -> int:\n return self._max_creds",
"def max_chain_height(self):\n return self.client.call('GET',\n self.name + 'active-peers/max-chain-height')",
"def password_length(self):\n return self.__password_length",
"def password_count(self) -> int:\n return pulumi.get(self, \"password_count\")",
"def countMaxDegree(self):\r\n max_degree = [0, 0] # le sommet, son nombre de connection \r\n for i_node, node_connections in enumerate(self.adjMatrix):\r\n connection = self.n - node_connections.count(0) # on compte le nombre de connections du sommet\r\n if connection > max_degree[1]:\r\n max_degree = max_degree[i_node, node_connections]\r\n return max_degree[0], max_degree[1] # C un tuple ! \r",
"def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())",
"def get_max_seat_id(boarding_passes: list) -> int:\n return max(get_seat_id(boarding_pass) for boarding_pass in boarding_passes)",
"def computeMaxPower(self):\n self.max_power = max(self.getOutputValue(\"RotPwr\"))",
"def numPasses(self):\n if not self.baxH5.hasConsensusBasecalls:\n raise ValueError, \"No CCS reads in this file\"\n return self.baxH5._ccsNumPasses[self.index]",
"def get_password_leaks_count(hashes, hashes_to_check):\n hashes = (line.split(':') for line in hashes.text.splitlines())\n for h, count in hashes:\n if h == hashes_to_check:\n return count\n return 0",
"def maximal_destination_for_passenger(state, problem):\n unsatisfied = [p for p in state.passengers if not (p.is_arrived() or p.onboard)]\n if unsatisfied:\n max_dist = max([p.opt for p in unsatisfied])\n return max_dist\n return 0",
"def pass_attempts(self):\n return self._pass_attempts",
"def password_validity(user):\n # password change information:\n delay = constants.USER_PASS_SPAN # default users\n\n if user.is_staff: # staff/admins\n delay = constants.ADMIN_PASS_SPAN\n\n # default last pass update, join date of the user\n date_joined = user.date_joined\n\n # last change log\n last_change = Password_Change_Log.objects.filter(user=user).aggregate(\n date_max=Max('timestamp'))['date_max']\n\n # if there is record of last password change, use it\n if last_change:\n last_date = last_change\n # or take the join date as default\n else:\n last_date = date_joined\n\n difference = (timezone.now()-last_date).days\n return max(0, delay-difference)",
"def getMaxPower(self):\n return self.max_power",
"def __len__(self):\n return self.cli.passwords.len()",
"def fieldBaseStrength(self):\n return self.params['fieldStrength']",
"def getmaxnumanz(self): # 3\n res,resargs = self.__obj.getmaxnumanz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumanz_return_value = resargs\n return _maxnumanz_return_value"
] | [
"0.6741339",
"0.60329866",
"0.58437866",
"0.58013153",
"0.5779518",
"0.57709515",
"0.57115567",
"0.56688434",
"0.5657231",
"0.5628787",
"0.5628583",
"0.5627118",
"0.5619595",
"0.56186515",
"0.56177336",
"0.55893004",
"0.55542743",
"0.5524528",
"0.55046046",
"0.5483269",
"0.54557925",
"0.54483557",
"0.54241693",
"0.54127747",
"0.538689",
"0.53867507",
"0.53788406",
"0.53721195",
"0.5361437",
"0.53515816"
] | 0.6484286 | 1 |
How many IPs are allowed by the blacklist? >>> example_blacklist = RangeSet.from_file(data_path(__file__, 'example.txt')) >>> print(example_blacklist) 02, 48 >>> part_2(example_blacklist, total_ips_count=10) | def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:
allowed_count = total_ips_count - len(ranges)
print(f"part 2: there are total {allowed_count} allowed IPs")
return allowed_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_count():\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n while True:\n try:\n for user in get_count_request():\n ip, count, protocol = str(user[0][0]), user[1][0], str(user[2][0])\n if count >= int(config[protocol]['Count Request']) and ip not in BLACK_LIST:\n BLACK_LIST.append(ip)\n logging.warning(ip)\n\n except Exception as e:\n logging.debug(e)",
"def part_1(ranges: 'RangeSet') -> int:\n\n first_allowed = ranges.ranges[0].vmax + 1\n print(f\"part 1: first allowed IP address is {first_allowed}\")\n return first_allowed",
"def loadMaxIPlist(self, filename):\r\n #I need to put this in a try/catch block later \r\n \r\n maxIPlist=10\r\n linecount=0 \r\n iplist=[]\r\n with open(filename, 'r') as infile:\r\n element = infile.readline()\r\n while element:\r\n \r\n linecount +=1\r\n if linecount < maxIPlist:\r\n iplist.append(element)\r\n element = infile.readline()\r\n \r\n self.objdict['IPADDRESS']=iplist\r\n print(\"Loaded \", linecount, \" ip addresses\")\r\n\r\n return(linecount)",
"def bpCount(file):\n amount_bp = len(file)\n return amount_bp",
"def available_ip_address_count(self) -> int:\n return pulumi.get(self, \"available_ip_address_count\")",
"def n_available_tasks(app_id, user_id=None, user_ip=None):\r\n\r\n if user_id and not user_ip:\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_id=:user_id AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_id=user_id)\r\n else:\r\n if not user_ip:\r\n user_ip = '127.0.0.1'\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_ip=user_ip)\r\n n_tasks = 0\r\n for row in result:\r\n n_tasks = row.n_tasks\r\n return n_tasks",
"def get_num_bags():\n with open('adventofcode/twentytwenty/static_data/day7.txt', 'r') as f:\n lines = f.readlines()\n\n rules, bags = format_data(lines)\n\n total = 0\n included_bags, num_bags = run_recursion([BAG_TYPE], bags, rules, total)\n # print(included_bags)\n\n return num_bags",
"def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_private_ip_address_count\")",
"def get_count_rem_local_ips(self):\n return len(self.remaining_local_ips)",
"def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ",
"def get_num_of_baskets(self):\n return self.num_of_baskets",
"def filter_ipnet_range_size(network_cidr, range_start, range_end):\n try:\n network_cidr_str = unicode(network_cidr)\n range_start_str = unicode(range_start)\n range_end_str = unicode(range_end)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n range_start_str = str(range_start)\n range_end_str = str(range_end)\n try:\n ipnet = IPv4Network(network_cidr_str)\n ip1 = IPv4Address(range_start_str)\n ip2 = IPv4Address(range_end_str)\n\n if ip1 in ipnet and ip2 in ipnet:\n index1 = list(ipnet.hosts()).index(ip1)\n index2 = list(ipnet.hosts()).index(ip2)\n ip_range_size = index2 - index1 + 1\n return ip_range_size\n else:\n raise ValueError\n except ValueError as ex:\n logging.error(range_start_str + \" and \" + range_end_str +\n \" are not valid IP addresses for range inside \" +\n network_cidr_str)\n raise",
"def calculate_subnets(total, breakdown):\n sanity_percent = 0 # if this isn't 100% by the end, we got issues.\n subnets = 0\n for nodep, netp in breakdown:\n sanity_percent += nodep\n if (sanity_percent > 100):\n return -1\n subtotal = int(total * .01 * nodep)\n groupby = int(254 * .01 *netp)\n subnets += math.ceil(subtotal/groupby)\n if (sanity_percent < 100):\n return -1\n return subnets",
"def number_of_trips(filename): \r\n \r\n with open(filename, 'r') as f_in:\r\n # set up csv reader object\r\n trip_reader = csv.DictReader(f_in)\r\n \r\n # initialize count variables\r\n n_subscribers = 0\r\n n_customers = 0\r\n \r\n # tally up ride types\r\n for row in trip_reader:\r\n if row['user_type'] == 'Subscriber':\r\n n_subscribers += 1\r\n else:\r\n n_customers += 1\r\n \r\n # compute total number of rides\r\n n_total = n_subscribers + n_customers\r\n \r\n # return tallies as a tuple\r\n return(n_subscribers, n_customers, n_total)",
"def generate_ips(num, prefix, exclude_ips):\n prefix = IPNetwork(prefix)\n exclude_ips.append(prefix.broadcast)\n exclude_ips.append(prefix.network)\n available_ips = list(prefix)\n\n if len(available_ips) - len(exclude_ips)< num:\n raise Exception(\"Not enough available IPs\")\n\n generated_ips = []\n for available_ip in available_ips:\n if available_ip not in exclude_ips:\n generated_ips.append(IPNetwork(str(available_ip) + '/' + str(prefix.prefixlen)))\n if len(generated_ips) == num:\n break\n\n return generated_ips",
"def linesCountingAux(file_name, nProcesses):\r\n\r\n linesPerProcessesList = []\r\n\r\n with open(file_name, \"r\") as file:\r\n lineCounting = 0\r\n\r\n for line in file:\r\n lineCounting += 1 #discover the lines in the text file\r\n\r\n linesPerProcesses = lineCounting // nProcesses\r\n\r\n for number in range(nProcesses):\r\n linesPerProcessesList.append(linesPerProcesses)\r\n if sum(linesPerProcessesList) < lineCounting:\r\n for number in range (lineCounting - sum(linesPerProcessesList)):\r\n linesPerProcessesList[number] += 1\r\n\r\n return linesPerProcessesList",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"def add_available_ipaddresses(prefix, ipaddress_list, is_pool=False):\n\n output = []\n prev_ip = None\n\n # Ignore the network and broadcast addresses for non-pool IPv4 prefixes larger than /31.\n if prefix.version == 4 and prefix.prefixlen < 31 and not is_pool:\n first_ip_in_prefix = netaddr.IPAddress(prefix.first + 1)\n last_ip_in_prefix = netaddr.IPAddress(prefix.last - 1)\n else:\n first_ip_in_prefix = netaddr.IPAddress(prefix.first)\n last_ip_in_prefix = netaddr.IPAddress(prefix.last)\n\n if not ipaddress_list:\n return [(\n int(last_ip_in_prefix - first_ip_in_prefix + 1),\n '{}/{}'.format(first_ip_in_prefix, prefix.prefixlen)\n )]\n\n # Account for any available IPs before the first real IP\n if ipaddress_list[0].address.ip > first_ip_in_prefix:\n skipped_count = int(ipaddress_list[0].address.ip - first_ip_in_prefix)\n first_skipped = '{}/{}'.format(first_ip_in_prefix, prefix.prefixlen)\n output.append((skipped_count, first_skipped))\n\n # Iterate through existing IPs and annotate free ranges\n for ip in ipaddress_list:\n if prev_ip:\n diff = int(ip.address.ip - prev_ip.address.ip)\n if diff > 1:\n first_skipped = '{}/{}'.format(prev_ip.address.ip + 1, prefix.prefixlen)\n output.append((diff - 1, first_skipped))\n output.append(ip)\n prev_ip = ip\n\n # Include any remaining available IPs\n if prev_ip.address.ip < last_ip_in_prefix:\n skipped_count = int(last_ip_in_prefix - prev_ip.address.ip)\n first_skipped = '{}/{}'.format(prev_ip.address.ip + 1, prefix.prefixlen)\n output.append((skipped_count, first_skipped))\n\n return output",
"def test_amount_of_listings_many_listings(self):\n listings = steam_market.get_total_amount_of_listings(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual(164720, listings)",
"def get_total_num_clients(task):\n if task == 'stackoverflow_lr':\n return 342477\n else:\n raise ValueError(f'Unsupported task: {task}')",
"def num_partitions(self): # -> int:\n ...",
"def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total",
"def retrieve_num_instances(service):\n instance_counts = service[\"instance-counts\"]\n return instance_counts[\"healthy-instances\"] + instance_counts[\"unhealthy-instances\"]",
"def NSK129(userloads):\n nsk129 = NSK129_user_ids()\n total = userloads.read(nsk129[0])\n for user_id in nsk129[1:]:\n total += userloads.read(user_id)\n return total",
"def getnumbarcblocktriplets(self): # 3\n res,resargs = self.__obj.getnumbarcblocktriplets()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _num_return_value = resargs\n return _num_return_value",
"def test_insufficient_space(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.64/25\"],\n requests=[25],\n expected=None,\n )",
"def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))",
"def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()",
"def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])",
"def Count(self, limit=None):\n if limit is None:\n count = 0\n for i in self.Run():\n count += 1\n return count\n else:\n return len(self.Get(limit))"
] | [
"0.57934356",
"0.5705135",
"0.56226957",
"0.5578852",
"0.5336283",
"0.5191611",
"0.5136622",
"0.51027375",
"0.50659984",
"0.5059147",
"0.50535524",
"0.49664873",
"0.49608132",
"0.49188912",
"0.4906973",
"0.48834145",
"0.4876514",
"0.48726276",
"0.48689985",
"0.48537868",
"0.48473972",
"0.48097643",
"0.48009944",
"0.47964096",
"0.47930187",
"0.47830328",
"0.47786176",
"0.47748327",
"0.4758728",
"0.47352958"
] | 0.80164754 | 0 |
Logical disjunction of two ranges. Contains items present in either. However if the two ranges are disjunct (no common items), `None` is returned. >>> Range(10, 20) | Range(1, 3) >>> Range(10, 20) | Range(1, 9) Range(1, 20) >>> Range(10, 20) | Range(1, 10) Range(1, 20) >>> Range(10, 20) | Range(1, 14) Range(1, 20) >>> Range(10, 20) | Range(1, 20) Range(1, 20) >>> Range(10, 20) | Range(1, 25) Range(1, 25) >>> Range(10, 20) | Range(9, 9) Range(9, 20) >>> Range(10, 20) | Range(9, 15) Range(9, 20) >>> Range(10, 20) | Range(9, 22) Range(9, 22) >>> Range(10, 20) | Range(10, 10) Range(10, 20) >>> Range(10, 20) | Range(10, 20) Range(10, 20) >>> Range(10, 20) | Range(10, 22) Range(10, 22) >>> Range(10, 20) | Range(14, 17) Range(10, 20) >>> Range(10, 20) | Range(14, 20) Range(10, 20) >>> Range(10, 20) | Range(14, 23) Range(10, 23) >>> Range(10, 20) | Range(20, 21) Range(10, 21) >>> Range(10, 20) | Range(21, 21) Range(10, 21) >>> Range(10, 20) | Range(21, 24) Range(10, 24) >>> Range(10, 20) | Range(22, 22) >>> Range(10, 20) | Range(22, 24) | def __or__(self, other):
if not isinstance(other, Range):
raise TypeError(
f"unsupported operand types for |: "
f"{type(self).__name__!r} and {type(other).__name__!r}"
)
if self == other:
return Range(self.vmin, self.vmax)
elif self.vmax < other.vmin - 1:
return None
elif self.vmin > other.vmax + 1:
return None
return Range(
vmin=min(self.vmin, other.vmin),
vmax=max(self.vmax, other.vmax)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2",
"def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)",
"def __and__(self, other):\n if not isinstance(other, Range):\n raise TypeError(\n f\"unsupported operand types for &: \"\n f\"{type(self).__name__!r} and {type(other).__name__!r}\"\n )\n\n if self == other:\n return Range(self.vmin, self.vmax)\n elif self < other or self > other:\n return None\n\n return Range(\n vmin=max(self.vmin, other.vmin),\n vmax=min(self.vmax, other.vmax)\n )",
"def isdisjoint(self, other: Union[Rangelike, Iterable[Rangelike]]) -> bool:\n # convert to RangeSet\n other = RangeSet._to_rangeset(other)\n # O(n^2) comparison\n # TODO improve efficiency by mergesort/short-circuiting\n return all(rng1.isdisjoint(rng2) for rng1 in self._ranges for rng2 in other._ranges)",
"def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))",
"def overlapping_ranges(\n ranges_1: Sequence[Tuple[int, int]],\n ranges_2: Sequence[Tuple[int, int]],\n) -> List[Tuple[int, int]]:\n return [\n (max(first[0], second[0]), min(first[1], second[1]))\n for first in ranges_1\n for second in ranges_2\n if max(first[0], second[0]) < min(first[1], second[1])\n ]",
"def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def range_matches(self, other):\n return (\n self.begin == other.begin and \n self.end == other.end\n )",
"def _range_overapped(self, x, y):\n xs = set( range(x[0], x[1]))\n ys = set( range(y[0], y[1]))\n return xs.intersection(ys)",
"def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)",
"def range_union(ranges):\n union = []\n for r in sorted(ranges, key=lambda r: r.start):\n if len(union) > 0 and union[-1].stop >= r.start:\n union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))\n else:\n union.append(r)\n return union",
"def do_overlap(r1, r2):\n r1_s, r1_e = r1\n r2_s, r2_e = r2\n\n return r1_s <= r2_s <= r1_e or r2_s <= r1_s <= r2_e",
"def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start",
"def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1",
"def overlaps(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoOverlaps(self, right).to_expr()",
"def range_overlap(ranges):\n max_left = 0.0\n min_right = 1.0\n for (left, right) in ranges:\n max_left = max(max_left, left)\n min_right = min(min_right, right)\n return (max_left, min_right)",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def _op_or_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n return Collection(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left | right",
"def symmetric_difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to a RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # get union and then remove intersections\n union = self.union(rng_set)\n intersection = self.intersection(rng_set)\n union.difference_update(intersection)\n return union",
"def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]",
"def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail",
"def __or__(self, other):\n\n union = list(self)\n union.extend([value for value in other if value not in union])\n\n return union",
"def overlaps(self, begin, end=None):\n if end is not None:\n # An overlap means that some C exists that is inside both ranges:\n # begin <= C < end\n # and \n # self.begin <= C < self.end\n # See https://stackoverflow.com/questions/3269434/whats-the-most-efficient-way-to-test-two-integer-ranges-for-overlap/3269471#3269471\n return begin < self.end and end > self.begin\n try:\n return self.overlaps(begin.begin, begin.end)\n except:\n return self.contains_point(begin)",
"def get_overlap(self, other):\n return self.intersection_over_union(other)",
"def doesNotOverlap( self, other):\n return not self.overlaps( other)",
"def overlap(self, other):\n\t\toverlap = self.contains(other.startX, other.startY) or \\\n\t\t\tself.contains(other.startX, other.endY) or \\\n\t\t\tself.contains(other.endX, other.startY) or \\\n\t\t\tself.contains(other.endX, other.endY)\n\n\t\tintersectY1 = self.startY <= other.startY <= self.endY and \\\n\t\t\tself.startY <= other.endY <= self.endY and \\\n\t\t\t(other.startX <= self.startX <= other.endX or \\\n\t\t\tother.startX <= self.endX <= other.endX)\n\n\t\tintersectY2 = other.startY <= self.startY <= other.endY and \\\n\t\t\t other.startY <= self.endY <= other.endY and \\\n\t\t\t (self.startX <= other.startX <= self.endX or \\\n\t\t\t self.startX <= other.endX <= self.endX)\n\n\t\tintersectY = intersectY1 or intersectY2\n\n\t\tintersectX1 = self.startX <= other.startX <= self.endY and \\\n\t\t\tself.startX <= other.endX <= self.endX and \\\n\t\t (other.startY <= self.startY <= other.endY or \\\n\t\t\tother.startY <= self.endY <= other.endY)\n\n\t\tintersectX2 = other.startX <= self.startX <= other.endX and \\\n\t\t\tother.startX <= self.endX <= other.endX and \\\n\t\t (self.startY <= other.startY <= self.endY or \\\n\t\t\tself.startY <= other.endY <= self.endY)\n\n\t\tintersectX = intersectX1 or intersectX2\n\n\t\treturn overlap or intersectX or intersectY",
"def _merge_ranges(ranges: Iterable[Range]) -> _LinkedList[Range]:\n # sort our list of ranges, first\n ranges = _LinkedList(sorted(ranges))\n # # determine if we need to do anything\n # if len(ranges) < 2:\n # return\n # try to merge each range with the one after it, up until the end of the list\n node = ranges.first\n while node and node.next:\n prev_range = node.value\n next_range = node.next.value\n new_range = prev_range.union(next_range)\n if new_range is not None: # TODO python 3.8 refactoring - this is a great place for :=\n node.value = new_range\n ranges.pop_after(node)\n else:\n node = node.next\n return ranges",
"def __or__(self, other):\n return self.union(other)",
"def __or__(self, other):\n return self.union(other)"
] | [
"0.66074175",
"0.64702195",
"0.6391564",
"0.63127744",
"0.6272544",
"0.618147",
"0.6166346",
"0.60126203",
"0.598657",
"0.5985955",
"0.58613884",
"0.5857347",
"0.5855965",
"0.58527166",
"0.5840612",
"0.58333844",
"0.5826907",
"0.580981",
"0.57973963",
"0.5791664",
"0.576489",
"0.57532406",
"0.57300454",
"0.5706941",
"0.5685044",
"0.56748474",
"0.566379",
"0.5623072",
"0.5622426",
"0.5622426"
] | 0.6901047 | 0 |
Gets the donor_participant of this AllOfCreateClaimResponseClaim. | def donor_participant(self) -> Object:
return self._donor_participant | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def donor_participant(self, donor_participant: Object):\n\n self._donor_participant = donor_participant",
"def participant(self) -> AllOfCancelClaimRequestParticipant:\n return self._participant",
"def participant(self) -> AllOfAcknowledgeClaimRequestParticipant:\n return self._participant",
"def __init__(self, donor_participant: Object=None, id: str=None, status: str=None, resolution_period_end: datetime=None, completion_period_end: datetime=None, last_modified: datetime=None): # noqa: E501\n self.swagger_types = {\n 'donor_participant': Object,\n 'id': str,\n 'status': str,\n 'resolution_period_end': datetime,\n 'completion_period_end': datetime,\n 'last_modified': datetime\n }\n\n self.attribute_map = {\n 'donor_participant': 'DonorParticipant',\n 'id': 'Id',\n 'status': 'Status',\n 'resolution_period_end': 'ResolutionPeriodEnd',\n 'completion_period_end': 'CompletionPeriodEnd',\n 'last_modified': 'LastModified'\n }\n self._donor_participant = donor_participant\n self._id = id\n self._status = status\n self._resolution_period_end = resolution_period_end\n self._completion_period_end = completion_period_end\n self._last_modified = last_modified",
"def get_donor(self, name):\n return self.donors.get(name, Donor(name))",
"def find_donor(self, existing_donor):\n donor_object = None\n for donor in self.donor_list:\n if existing_donor == donor._full_name:\n donor_object = donor\n break\n return donor_object",
"def get_thank_you(self, donor):\r\n donor_dict = {'name': donor.name, 'donation': donor.donations[-1],\r\n 'num_donations': len(donor.donations)}\r\n donor_dict['multiple'] = 's' if len(donor.donations) > 1 else ''\r\n\r\n thankyou = ('Dear {name}:\\n'\r\n 'Thank you for your generous donation of '\r\n '${donation:.2f}.\\nI really appreciate your '\r\n '{num_donations}\\ndonation{multiple} to our '\r\n 'organization.\\nI assure you that your contributions '\r\n 'will be put to\\ngood use!\\n\\n'\r\n 'Regards,\\nBen').format(**donor_dict)\r\n return thankyou",
"def participant_id(self):\n return self.data[\"id\"]",
"def claim_id(self) -> str:\n return self._claim_id",
"def claim_id(self) -> str:\n return self._claim_id",
"def get_donor_email(self):\n input_name = self.get_donor()\n if input_name in self.all_donors:\n print(self.r.hget(input_name, 'email'))",
"def participant(self, participant: AllOfCancelClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant",
"def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))",
"def getParticipant(self, discordId):\n if discordId in participants:\n return participants[discordId]\n else:\n return None",
"def post(self):\n dao = ClaimDao()\n return dao.create(api.payload)",
"def patient_consent_date(self):\n return self._patient_consent_date",
"def patient_consent(self):\n return 1 if self.patient_consent_date is not None else 0",
"def created_by(self):\n membership = UnitMembershipFactory(\n unit=self.unit, role=models.UnitMembershipRole.OWNER\n )\n return membership.user",
"def creator(self):\n return self._creator",
"def creator(self):\n return self._creator",
"def gen_donor():\n# <<<<<<< master\n return [donor for donor in donor_data]",
"def organizer(self):\n if \"organizer\" in self._prop_dict:\n if isinstance(self._prop_dict[\"organizer\"], OneDriveObjectBase):\n return self._prop_dict[\"organizer\"]\n else :\n self._prop_dict[\"organizer\"] = Recipient(self._prop_dict[\"organizer\"])\n return self._prop_dict[\"organizer\"]\n\n return None",
"def is_donor(self):\n return True",
"def reason(self) -> ConfirmClaimRequestpropertiesReason:\n return self._reason",
"def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)",
"def send_donor_email(donor):\n msg = Message('New Donor ID', \n sender='[email protected]', \n recipients=[donor.email])\n msg.body = f\"\"\"Thank you, {donor.first_name} for choosing to donate blood!\n Your Donor ID is {donor.id}\n We appreciate your blood!\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)",
"def is_participant(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.participants:\n return True\n return False",
"def creator(self) -> packets.CreatorPacket:\n if self._creator_packet is None:\n raise ValueError(\"Packet not found\")\n return self._creator_packet",
"def created_by(self):\n return self._created_by",
"def participant(self, participant: AllOfAcknowledgeClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant"
] | [
"0.64143723",
"0.62174505",
"0.5898315",
"0.50857437",
"0.49990976",
"0.48347864",
"0.48060545",
"0.46557683",
"0.45774764",
"0.45774764",
"0.45192257",
"0.4476471",
"0.44539085",
"0.4446725",
"0.4414176",
"0.43843213",
"0.43202215",
"0.43045747",
"0.42562267",
"0.42562267",
"0.42482528",
"0.42475334",
"0.42377648",
"0.42297116",
"0.42172498",
"0.4159982",
"0.41395897",
"0.41190144",
"0.4116875",
"0.41166732"
] | 0.7721172 | 0 |
Sets the donor_participant of this AllOfCreateClaimResponseClaim. | def donor_participant(self, donor_participant: Object):
self._donor_participant = donor_participant | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def participant(self, participant: AllOfCancelClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant",
"def donor_participant(self) -> Object:\n return self._donor_participant",
"def participant(self, participant: AllOfAcknowledgeClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant",
"def __init__(self, donor_participant: Object=None, id: str=None, status: str=None, resolution_period_end: datetime=None, completion_period_end: datetime=None, last_modified: datetime=None): # noqa: E501\n self.swagger_types = {\n 'donor_participant': Object,\n 'id': str,\n 'status': str,\n 'resolution_period_end': datetime,\n 'completion_period_end': datetime,\n 'last_modified': datetime\n }\n\n self.attribute_map = {\n 'donor_participant': 'DonorParticipant',\n 'id': 'Id',\n 'status': 'Status',\n 'resolution_period_end': 'ResolutionPeriodEnd',\n 'completion_period_end': 'CompletionPeriodEnd',\n 'last_modified': 'LastModified'\n }\n self._donor_participant = donor_participant\n self._id = id\n self._status = status\n self._resolution_period_end = resolution_period_end\n self._completion_period_end = completion_period_end\n self._last_modified = last_modified",
"def conversation_participant_arn(self, conversation_participant_arn):\n\n self._conversation_participant_arn = conversation_participant_arn",
"def conversation_participant_uuid(self, conversation_participant_uuid):\n\n self._conversation_participant_uuid = conversation_participant_uuid",
"def add_donor_object(self, donor_object):\n self.donors.append(donor_object)",
"def participant(self) -> AllOfCancelClaimRequestParticipant:\n return self._participant",
"def set_known_creator(self, target_item, creator_Q, reference):\n creator_item = self.wd.QtoItemPage(creator_Q)\n self.wd.addNewClaim(\n u'P170',\n WD.Statement(creator_item),\n target_item,\n reference)",
"def add_donor(self, new_donor: Donor):\n self.donor_list.append(new_donor)",
"def participant(self) -> AllOfAcknowledgeClaimRequestParticipant:\n return self._participant",
"def get_thank_you(self, donor):\r\n donor_dict = {'name': donor.name, 'donation': donor.donations[-1],\r\n 'num_donations': len(donor.donations)}\r\n donor_dict['multiple'] = 's' if len(donor.donations) > 1 else ''\r\n\r\n thankyou = ('Dear {name}:\\n'\r\n 'Thank you for your generous donation of '\r\n '${donation:.2f}.\\nI really appreciate your '\r\n '{num_donations}\\ndonation{multiple} to our '\r\n 'organization.\\nI assure you that your contributions '\r\n 'will be put to\\ngood use!\\n\\n'\r\n 'Regards,\\nBen').format(**donor_dict)\r\n return thankyou",
"def set_follower(self, follower):\n self.follower = follower",
"def send_donor_email(donor):\n msg = Message('New Donor ID', \n sender='[email protected]', \n recipients=[donor.email])\n msg.body = f\"\"\"Thank you, {donor.first_name} for choosing to donate blood!\n Your Donor ID is {donor.id}\n We appreciate your blood!\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)",
"def agent_requirement(self, agent_requirement):\n\n self._agent_requirement = agent_requirement",
"def create_donor(fullname):\n donors[fullname] = []",
"def set_anonymous_creator(self, target_item, related_info, reference):\n anonymous_q = 'Q4233718'\n anon_statement = WD.Statement(self.wd.QtoItemPage(anonymous_q))\n\n # set any related qualifiers\n if related_info:\n anon_statement.addQualifier(\n WD.Qualifier(\n P=related_info['P'],\n itis=related_info['itis']))\n\n # set claim\n self.wd.addNewClaim(\n u'P170',\n anon_statement,\n target_item,\n reference)",
"def perform_create(self, serializer):\n person = self.kwargs['person_guid']\n serializer.validated_data['person'] = Person.objects.get(\n person_guid=person)\n serializer.validated_data['author'] = self.request.user\n return super(PersonNoteListView, self).perform_create(serializer)",
"def patient_consent(self, value):\n self.patient_consent_date = utcnow() if value else None",
"def add_donor_neo(self, new_first_name, new_last_name, new_email, donation_amount):\n\t\twith self.driver.session() as session:\n\t\t\ttransaction_id = new_email + '_1'\n\t\t\tif not self.donor_exists_neo(new_email, session):\n\t\t\t\ttry:\n\t\t\t\t\tfull_name = new_first_name + ' ' + new_last_name\n\t\t\t\t\tcyph = \"CREATE (n:Donor {email:'%s', first_name:'%s',last_name:'%s', full_name:'%s'})\" % (new_email, new_first_name, new_last_name, full_name)\n\t\t\t\t\tsession.run(cyph)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f'Error creating = {new_email}')\n\t\t\t\t\tprint(e)\n\t\t\telse:\n\t\t\t\tself.add_donation_neo(new_email, donation_amount)",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def created_by(self, created_by):\n\n self._created_by = created_by",
"def conversation_participant_name(self, conversation_participant_name):\n\n self._conversation_participant_name = conversation_participant_name",
"def test_create_contributor(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.project,\n role=self.role_contributor,\n user=self.assign_user,\n ).first()\n self.assertIsNotNone(role_as)\n expected = {\n 'project': str(self.project.sodar_uuid),\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n 'sodar_uuid': str(role_as.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)",
"def add_donation(donor, donation_amount, donor_dict):\n donor_dict.setdefault(donor, []).append(donation_amount)",
"def thank_donor(filename, donorname, donation):\n\n try:\n with open(filename, 'w') as fhandle:\n fhandle.write(\"Hello {}! On behalf of our staff here at OMGBBQMMX,\"\n \"I want to thank you for your generous gift\"\n \" of ${:.2f}!\".format(donorname, donation))\n print(\"Email file generated!\")\n\n except IOError:\n raise (\"Unable to write thank you file. Please check\"\n \" permissions.\")"
] | [
"0.60350543",
"0.6025046",
"0.55765235",
"0.5541427",
"0.515288",
"0.4857232",
"0.47399157",
"0.4663719",
"0.4635826",
"0.45958862",
"0.4552072",
"0.44890487",
"0.44712678",
"0.44042704",
"0.43036908",
"0.426201",
"0.42143816",
"0.41979194",
"0.41707268",
"0.41660166",
"0.41602352",
"0.41602352",
"0.41602352",
"0.41602352",
"0.41602352",
"0.41602352",
"0.4156731",
"0.41354102",
"0.41328132",
"0.41209137"
] | 0.7887795 | 0 |
Writes an annotation on file according to the darkflow format. | def write_as_frcnn(annotation: pd.DataFrame,
path_to_annotations: str,
image_id: str):
annotation.to_csv(os.path.join(path_to_annotations, '..', 'annotations.txt'),
header=None,
index=None,
mode='a',
sep=',') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeAnnotations(b, f, ld, n=0):\n \n if args.format.lower() == 'kitti':\n writeKitty(b, os.path.join(ld, \"%06d.txt\" % n))\n elif args.format.lower() == 'voc':\n writeVOC(b, ld, f)\n elif args.format.lower() == 'darknet':\n writeDarknet(b, os.path.join(ld, \"%06d.txt\" % n))",
"def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")",
"def _save_annotation(annotation, filename):\n\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n '''\n with tf.io.gfile.GFile(filename, mode='w') as f:\n #with open(filename, mode='w') as f:\n print(f)\n pil_image.save(f, 'PNG')\n '''\n pil_image.save(filename)",
"def dump_annotations(self):\n fname = 'annotations'\n if self.split is not None:\n fname = 'annotations_{}'.format(self.split)\n fname = os.path.join(self.dest_folder, '{}.json'.format(fname))\n self.save(self.dataset, fname, \"annotations\")",
"def convert_to_annotation(file, output):\n resource = parse_bel_resource(file)\n\n write_annotation(\n keyword=resource['Namespace']['Keyword'],\n values={k: '' for k in resource['Values']},\n citation_name=resource['Citation']['NameString'],\n description=resource['Namespace']['DescriptionString'],\n file=output,\n )",
"def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')",
"def convert_to_annotation(file, output):\n from pybel.resources.definitions import write_annotation\n\n resource = parse_bel_resource(file)\n\n write_annotation(\n keyword=resource['Namespace']['Keyword'],\n values={k: '' for k in resource['Values']},\n citation_name=resource['Citation']['NameString'],\n description=resource['Namespace']['DescriptionString'],\n file=output\n )",
"def _write_annotations(self, anns, rec_id, out_handle):\n format_anns = self._format_keyvals(anns)\n if format_anns:\n print repr(format_anns)\n parts = [rec_id, \"annotation\", \"remark\", \".\", \".\", \".\", \".\", \".\",\n format_anns]\n out_handle.write(\"\\t\".join(parts) + \"\\n\")",
"def export_annotation_format(\n input_dir,\n output_dir,\n dataset_format,\n dataset_name,\n project_type=\"Vector\",\n task=\"object_detection\",\n platform=\"Web\",\n):\n\n if isinstance(input_dir, str):\n input_dir = Path(input_dir)\n if isinstance(output_dir, str):\n output_dir = Path(output_dir)\n\n args = Namespace(\n input_dir=input_dir,\n output_dir=output_dir,\n dataset_format=dataset_format,\n dataset_name=dataset_name,\n project_type=project_type,\n task=task,\n platform=platform,\n )\n\n _passes_sanity_checks(args)\n _passes_converter_sanity(args, 'export')\n\n export_from_sa(args)",
"def write_flow(flow, filename):\n f = open(filename, 'wb')\n magic = numpy.array([202021.25], dtype=numpy.float32)\n (height, width) = flow.shape[0:2]\n w = numpy.array([width], dtype=numpy.int32)\n h = numpy.array([height], dtype=numpy.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n f.close()",
"def write_flow(flow, filename):\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = flow.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n f.close()",
"def write_annot(filepath, labels, ctab, names, fill_ctab=True):\n with open(filepath, \"wb\") as fobj:\n dt = _ANNOT_DT\n vnum = len(labels)\n\n def write(num, dtype=dt):\n np.array([num]).astype(dtype).tofile(fobj)\n\n def write_string(s):\n s = (s if isinstance(s, bytes) else s.encode()) + b'\\x00'\n write(len(s))\n write(s, dtype='|S%d' % len(s))\n\n # Generate annotation values for each ctab entry\n if fill_ctab:\n ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3])))\n elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])):\n warnings.warn('Annotation values in {} will be incorrect'.format(\n filepath))\n\n # vtxct\n write(vnum)\n\n # convert labels into coded CLUT values\n clut_labels = ctab[:, -1][labels]\n clut_labels[np.where(labels == -1)] = 0\n\n # vno, label\n data = np.vstack((np.array(range(vnum)),\n clut_labels)).T.astype(dt)\n data.tofile(fobj)\n\n # tag\n write(1)\n\n # ctabversion\n write(-2)\n\n # maxstruc\n write(np.max(labels) + 1)\n\n # File of LUT is unknown.\n write_string('NOFILE')\n\n # num_entries\n write(ctab.shape[0])\n\n for ind, (clu, name) in enumerate(zip(ctab, names)):\n write(ind)\n write_string(name)\n for val in clu[:-1]:\n write(val)",
"def save_annotated_image(self, file: Path) -> None:\n pass",
"def create_rhd_annotations(annotations_file,\n annotations_out_path,\n color_path,\n fingers='ALL',\n hands_to_annotate='BOTH',\n annotate_non_visible=True,\n force_new_files=False):\n with open(annotations_file, 'rb') as f:\n annotations = pickle.load(f)\n\n if force_new_files:\n remove_files_in_folder(annotations_out_path)\n\n print(f\"Creating annotations in directory: {color_path}\")\n print(f\"Using annotation file: {annotations_file}\")\n print(f\"And outputting to: {annotations_out_path}\")\n for fi in tqdm(os.listdir(color_path)):\n if fi.endswith('png'):\n anno_file_name = f\"{fi.split('.')[0]}.an\"\n anno_file_path = os.path.join(annotations_out_path, anno_file_name)\n ind = int(fi.split('.')[0])\n \n right_hand = get_right_hand(ind, annotations)\n left_hand = get_left_hand(ind, annotations)\n \n with open(anno_file_path, 'w') as write_file:\n if hands_to_annotate.lower() == 'right':\n hands = [right_hand]\n elif hands_to_annotate.lower() == 'left':\n hands = [left_hand]\n else:\n hands = [right_hand, left_hand]\n\n for h in hands:\n if fingers == 'ALL':\n for p in h:\n visible = p[2] != 0\n if visible or annotate_non_visible:\n write_file.write(f\"{float(p[0])},{float(p[1])}\\n\")\n else:\n for f in fingers:\n p = h[FINGER_MAP[f]]\n visible = p[2] != 0\n if visible or annotate_non_visible:\n write_file.write(f\"{float(p[0])},{float(p[1])}\\n\")",
"def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())",
"def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())",
"def writeAnnotationsToFile(self, annotatedRegions):\n\n\t\t#The writeToCsv does not seem to work somehow, what if we do this by hand? Can we then write to file?\n\t\twriteToCsvManual(sys.argv[2], annotatedRegions)\n\t\t\t\n\t\t#write the merged dictionary to csv, the order of the annotations and regions should column-wise be the same. \n\t\t#writeToCsv('test.csv', annotatedRegions, False)\t",
"def save_annotations(self):\n r = requests.get(\n f'{self.api_host}/v1/entity-annotations?'\n f'annotation_type=Source reliability (binary)&size=100',\n headers=self.get_request_headers()\n )\n\n entity_annotations = r.json().get('entity_annotations')\n\n for annotation in entity_annotations:\n annotation_id = annotation.get('entity_id')\n with open(\n f'{self.data_folder}/annotations/{annotation_id}.json',\n 'w'\n ) as f:\n json.dump(annotation, f)",
"def save_to_file(self, save_to, to_format, annotations, item=None):\n # what file format\n if self.save_to_format is None:\n if to_format.lower() in [\"dataloop\", \"coco\"]:\n self.save_to_format = 'json'\n elif to_format.lower() in ['yolo']:\n self.save_to_format = 'txt'\n else:\n self.save_to_format = 'xml'\n\n # save\n # JSON #\n if self.save_to_format == 'json':\n # save json\n save_to = save_to + '.json'\n with open(save_to, \"w\") as f:\n json.dump(annotations, f, indent=2)\n\n # TXT #\n elif self.save_to_format == 'txt':\n # save txt\n save_to = save_to + '.txt'\n with open(save_to, \"w\") as f:\n for ann in annotations:\n if ann is not None:\n f.write(' '.join([str(x) for x in ann]) + '\\n')\n\n # XML #\n elif self.save_to_format == 'xml':\n output_annotation = {\n 'path': item.filename,\n 'filename': os.path.basename(item.filename),\n 'folder': os.path.basename(os.path.dirname(item.filename)),\n 'width': item.width,\n 'height': item.height,\n 'depth': 3,\n 'database': 'Unknown',\n 'segmented': 0,\n 'objects': annotations\n }\n save_to = save_to + '.xml'\n environment = Environment(loader=PackageLoader('dtlpy', 'assets'),\n keep_trailing_newline=True)\n annotation_template = environment.get_template(self.xml_template_path)\n with open(save_to, 'w') as file:\n content = annotation_template.render(**output_annotation)\n file.write(content)\n else:\n raise exceptions.PlatformException('400', 'Unknown file format to save to')",
"def write(self, path):\n\n annotation = copy.deepcopy(self.annotation)\n\n for image_info in annotation['images']:\n image_info['file_name'] = os.path.relpath(image_info['file_name'],\n os.path.dirname(path))\n\n with open(path, 'w') as read_file:\n json.dump(annotation, read_file)",
"def save_annotation(self, drivename, fname, json_str):\n\t\tassert type(json_str) == str, \"json must be a string\"\n\t\tif not isdir(self.OUTPUT_ANN_DIR):\n\t\t\ttry: \n\t\t\t makedirs(self.OUTPUT_ANN_DIR)\n\t\t\texcept OSError: \n\t\t\t print (\"Creation of the directory {} failed\".format(self.OUTPUT_ANN_DIR))\n\n\t\toutput_drive_dir = join(self.OUTPUT_ANN_DIR, drivename)\n\t\tif not isdir(output_drive_dir):\n\t\t\ttry: \n\t\t\t makedirs(output_drive_dir)\n\t\t\texcept OSError: \n\t\t\t print (\"Creation of the directory {} failed\".format(output_drive_dir))\n\n\t\ttry:\n\t\t\tjson_object = json.loads(json_str)\n\t\texcept ValueError:\n\t\t\tprint(\"Annotation not a valid json\")\n\t\t\treturn 0\n\t\tfname = fname.split(\".\")[0] + \".json\"\n\t\tsave_filename = join(self.OUTPUT_ANN_DIR, drivename, fname)\n\t\twith open(save_filename, \"w\") as f:\n\t\t\tf.write(json_str)\n\t\t\treturn 1\n\t\treturn 0",
"def to_file(self, file_path, smirnoff_data):\n pass",
"def write(self, path: str, embeddings: Embeddings):\n if self == Format.finalfusion:\n embeddings.write(path)\n elif self == Format.word2vec:\n write_word2vec(path, embeddings)\n elif self == Format.text:\n write_text(path, embeddings)\n elif self == Format.textdims:\n write_text_dims(path, embeddings)\n elif self == Format.fasttext:\n write_fasttext(path, embeddings)\n else:\n raise ValueError(f\"Unknown format {str(self)}\")",
"def writeToFile(ruleset, className, classValue, fp):\n size = len(ruleset)\n if (size != 0):\n for i in range(size):\n j=0\n while(j<len(ruleset[i])-1):\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+' & ')\n j = j+1\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+\" -> (\"+className+\", \"+classValue+\") \\n\")\n fp.close()",
"def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def write_graph(self, graph_def):\n write_name = self.params.model_name+\"_v\"+self.params.version+\".pb\"\n self.writer = tf.compat.v1.summary.FileWriter(self.params.save_dir, graph=self.graph)\n tf.io.write_graph(graph_def,\n logdir=self.params.save_dir, name=write_name, as_text=False)\n self.logger.log_info(\"Graph def saved in file %s\"%self.params.save_dir+write_name)",
"def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')",
"def write_to_file(self, filename: str) -> None:",
"def write(self, fname):\n pass",
"def write(self, filename):\n pass"
] | [
"0.6668815",
"0.6605532",
"0.6497272",
"0.6436646",
"0.64314204",
"0.6285113",
"0.624797",
"0.624234",
"0.6178932",
"0.6084837",
"0.6082922",
"0.6038828",
"0.5981543",
"0.59467894",
"0.5830986",
"0.5830986",
"0.5819359",
"0.5800235",
"0.5796739",
"0.57627714",
"0.57070196",
"0.56658345",
"0.56436515",
"0.56323963",
"0.56203765",
"0.55376995",
"0.54746073",
"0.5471026",
"0.54641116",
"0.54514855"
] | 0.67075735 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.