query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test that admin can delete a product
def test_admin_delete_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product deleted!') self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_products_ref_users_delete(self):\n pass", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_delete(self):\n pass", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_products_ref_users_user_delete(self):\n pass", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_item_using_delete(self):\n pass", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete_of_an_escalated_article_with_admin(self):\n token = self.user3.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.delete_article()\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(resp.data[\"message\"], self.admin_delete)", "def test_delete_admin_from_org(self):\n pass", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def delete_products(request):\n product_obj = Products.objects.get(id=request.data[\"id\"])\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)" ]
[ "0.83574384", "0.83247185", "0.8285091", "0.82623804", "0.8177644", "0.8141131", "0.80846477", "0.80198777", "0.8002211", "0.7932145", "0.7909494", "0.7733621", "0.7691911", "0.7655855", "0.756794", "0.7516048", "0.75010175", "0.7476865", "0.74705225", "0.7435304", "0.73770976", "0.7331892", "0.7221541", "0.7064374", "0.702435", "0.7012553", "0.6978083", "0.6963353", "0.69624376", "0.6948373" ]
0.8554349
0
Test that a non admin cannot delete a product
def test_non_admin_cannot_delete_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.admin_create_user() reply = self.attendant_login() token = reply['token'] resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Unauthorized Access!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_products_ref_users_delete(self):\n pass", "def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_products_ref_users_user_delete(self):\n pass", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(account_id=self.appuser_id)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def test_no_delete_permission(client):\n user = user_with_permissions(\"polls.view_poll\")\n poll = Poll.objects.create(owner=user, text=\"question\")\n\n url = reverse(\"admin:polls_poll_change\", args=(poll.pk,))\n delete_url = reverse(\"admin:polls_poll_delete\", args=(poll.pk,))\n client.force_login(user)\n\n response = client.get(url)\n assert delete_url not in response.content.decode()", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))" ]
[ "0.8456681", "0.82070416", "0.8153313", "0.772525", "0.750598", "0.75052166", "0.7497598", "0.74397874", "0.73591393", "0.7334757", "0.7330903", "0.72217363", "0.72114277", "0.71953607", "0.7125004", "0.70727336", "0.70629984", "0.6935391", "0.69298255", "0.6893835", "0.6886094", "0.6867642", "0.6865682", "0.6857384", "0.6854631", "0.6844916", "0.684339", "0.6829421", "0.68289196", "0.6807431" ]
0.834971
1
Test that admin cannnot delete a product from empty Inventory
def test_admin_cannot_delete_product_from_empty_Inventory(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'There are no products in Inventory!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_delete_inventory(self):\n # save the current number of inventories for later comparision\n inventory_count = self.get_inventory_count()\n # delete a inventory\n resp = self.app.delete('/inventories/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_inventory_count()\n self.assertEqual(new_count, inventory_count - 1)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_vault_delete_vault_item(self):\n pass", "def test_deletehardwares_item(self):\n pass", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_delete_item_using_delete(self):\n pass", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def check_and_delete(self, inventory): # used in a transaction block only so dont initiate a transaction here\n try:\n lines = inventory.lines\n for i in lines:\n if i.quantity == 0:\n i.delete((i,))\n # inventory.reload()\n inventory.save()\n chk = inventory.lines\n if len(chk) == 0:\n inventory.state = 'cancel'\n inventory.save()\n inventory.delete((inventory,))\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_inventory(self):\n pass", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add_and_remove_two_items(self):\n login = LoginPage(self.driver) #SAUCE-LAB-5\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products[0]\n first_item: InventoryItem\n first_item.add_to_cart()\n print('\\n')\n print(first_item.get_title())\n print(first_item.get_description())\n print(first_item.get_price())\n print('*' * 80)\n second_item = inventory_page.products[4]\n second_item: InventoryItem\n second_item.add_to_cart()\n print('\\n')\n print(second_item.get_title())\n print(second_item.get_description())\n print(second_item.get_price())\n print('*' * 80)\n first_item.remove_from_cart()\n second_item.remove_from_cart()\n print(f'Products {first_item.get_title()} and {second_item.get_title()} were successfully removed')", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_delete_nveto_pmt_item(self):\n pass" ]
[ "0.79532677", "0.7735564", "0.7528131", "0.7521453", "0.7518936", "0.746801", "0.7354772", "0.72263944", "0.71801", "0.71784055", "0.7171436", "0.7112214", "0.7106493", "0.70771635", "0.6879483", "0.68655676", "0.68639493", "0.68453324", "0.6812393", "0.6777014", "0.67697865", "0.6754222", "0.6747571", "0.67225647", "0.66775954", "0.6598845", "0.6592748", "0.65878624", "0.65853804", "0.6577626" ]
0.8764794
0
Test that admin cannnot delete a nonexistant product
def test_admin_cannot_delete_nonexistant_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/products/2', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'This product does not exist in Inventory!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_products_ref_users_delete(self):\n pass", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_delete(self):\n pass", "def test_products_ref_users_user_delete(self):\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_deletehardwares_item(self):\n pass", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete_run(self):\n pass", "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def test_delete_admin_from_org(self):\n pass", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)" ]
[ "0.8434453", "0.81416374", "0.8129146", "0.79895574", "0.7884565", "0.7712441", "0.7642063", "0.75856626", "0.7577927", "0.757099", "0.7507489", "0.75073075", "0.75018024", "0.7473333", "0.7418871", "0.7409584", "0.72817427", "0.7275699", "0.72678584", "0.7262097", "0.7227959", "0.71377033", "0.71309066", "0.71301854", "0.71134305", "0.7075899", "0.70369524", "0.70226353", "0.70187646", "0.7013848" ]
0.8506851
0
Test ComBat feature harmonization.
def test_combat(): # Check if example data directory exists example_data_dir = th.find_exampledatadir() # Check if example data required exists features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5')) if len(features) < 7: message = 'Too few example features for ComBat testing not found! ' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) elif len(features) > 7: message = 'Too many example features for ComBat testing not found! ' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) objectlabels = os.path.join(example_data_dir, 'objectlabels.csv') # Python config = os.path.join(example_data_dir, 'ComBatConfig_python.ini') features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_python_') for f in features] # First run synthetic test # Synthetictest() # # Run the Combat function: only for training # ComBat(features_train_in=features, # labels_train=objectlabels, # config=config, # features_train_out=features_train_out) # # Run the Combat function: now for train + testing ComBat(features_train_in=features[0:4], labels_train=objectlabels, config=config, features_train_out=features_train_out[0:4], features_test_in=features[4:], labels_test=objectlabels, features_test_out=features_train_out[4:]) # # Matlab # config = os.path.join(example_data_dir, 'ComBatConfig_matlab.ini') # features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_matlab_') for f in features] # # # # Run the Combat function: only for training # ComBat(features_train_in=features, # labels_train=objectlabels, # config=config, # features_train_out=features_train_out) # # # Run the Combat function: now for train + testing # ComBat(features_train_in=features[0:4], # labels_train=objectlabels, # config=config, # features_train_out=features_train_out[0:4], # features_test_in=features[4:], # labels_test=objectlabels, # features_test_out=features_train_out[4:]) # Remove the feature files # for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')): # os.remove(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def feature():\n pass", "def test_combat_fastr():\n # Check if example data directory exists\n example_data_dir = th.find_exampledatadir()\n\n # Check if example data required exists\n features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5'))\n if len(features) < 6:\n message = 'Too few example features for ComBat testing not found!' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n elif len(features) > 6:\n message = 'Too many example features for ComBat testing not found!' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n\n objectlabels = os.path.join(example_data_dir, 'objectlabels.csv')\n\n # Python\n config = os.path.join(example_data_dir, 'ComBatConfig_python.ini')\n\n # Create the fastr network\n experiment = fastr.create_network('test_ComBat')\n\n source_features = experiment.create_source('HDF5', id='features_in', node_group='features')\n source_labels = experiment.create_source('PatientInfoFile', id='labels', node_group='pctrain')\n source_config = experiment.create_source('ParameterFile', id='config', node_group='conf')\n\n sink_features = experiment.create_sink('HDF5', id='features_out')\n\n node_combat = experiment.create_node('combat/ComBat:1.0',\n tool_version='1.0',\n id='ComBat',)\n\n link_combat_1 = experiment.create_link(source_config.output, node_combat.inputs['config'])\n link_combat_2 = experiment.create_link(source_labels.output, node_combat.inputs['patientclass_train'])\n link_combat_1.collapse = 'conf'\n link_combat_2.collapse = 'pctrain'\n\n # Mimic using two feature toolboxes\n links_Combat1_train = node_combat.inputs['features_train']['MR_0'] << source_features.output\n links_Combat1_train.collapse = 'features'\n\n links_Combat2_train = node_combat.inputs['features_train']['MR_1'] << source_features.output\n links_Combat2_train.collapse = 'features'\n\n links_Combat_out_train = sink_features.input << node_combat.outputs['features_train_out']\n links_Combat_out_train.collapse = 'ComBat'\n\n # Provide source and sink data\n source_data = dict()\n source_data['features_in'] = features\n source_data['labels'] = objectlabels\n source_data['config'] = config\n\n sink_data = dict()\n sink_data['features_out'] = \"vfs://output/test_ComBat/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}\"\n\n # Execute\n experiment.execute(source_data, sink_data, execution_plugin='LinearExecution')\n\n # Remove the feature files\n for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')):\n os.remove(i)", "def test_theft_and_stealing(self):", "def test_predictor():", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def test_machine_learning():", "def feat():\n pass", "def test_features(self):\n assert list(parser.generate_commands(yaml.load(\n '- my_command: {name: my_name}'))) == [('my_command', {'name': 'my_name'})]", "def test_text_classifier_vaporise(self):\n pass", "def test_01_lighting(self):", "def testBeliefs1sk(self):", "def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)", "def test_active_inference_SPM_1b(self):", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test_training(self):\n\t\tpass", "def feature(self, node=\"clickhouse1\"):\n self.context.node = self.context.cluster.node(node)\n\n for scenario in loads(current_module(), Scenario):\n scenario()", "def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=16000,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=5,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--feature_type\", default=\"world\", choices=[\"world\", \"melspc\", \"mcep\"],\n type=str, help=\"feature type\")\n parser.add_argument(\n \"--mspc_dim\", default=80,\n type=int, help=\"Dimension of mel spectrogram\")\n parser.add_argument(\n \"--minf0\", default=40,\n type=int, help=\"minimum f0 for world analysis\")\n parser.add_argument(\n \"--maxf0\", default=400,\n type=int, help=\"maximum f0 for world analysis\")\n parser.add_argument(\n \"--fmin\", default=None, nargs=\"?\",\n type=int, help=\"minimum frequency for melspc\")\n parser.add_argument(\n \"--fmax\", default=None, nargs=\"?\",\n type=int, help=\"maximum frequency for melspc\")\n parser.add_argument(\n \"--mcep_dim\", default=24,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=0.41,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=1024,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--highpass_cutoff\", default=70,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\n \"--save_wav\", default=True,\n type=strtobool, help=\"Whether to save filtered wav file\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n else:\n logging.basicConfig(level=logging.WARNING,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n logging.warning(\"logging is disabled.\")\n\n # show arguments\n for key, value in vars(args).items():\n logging.info(\"%s = %s\" % (key, str(value)))\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n logging.info(\"number of utterances = %d\" % len(file_list))\n\n # check directory existence\n if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:\n os.makedirs(args.wavdir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n # divide list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n if args.feature_type == \"world\":\n target_fn = world_feature_extract\n elif args.feature_type == \"melspc\":\n target_fn = melspectrogram_extract\n else:\n target_fn = melcepstrum_extract\n for f in file_lists:\n p = mp.Process(target=target_fn, args=(f, args,))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_classify_cuisine(self):\n pass", "def test_T01():", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()" ]
[ "0.652699", "0.64427763", "0.6373243", "0.6333925", "0.6331873", "0.6215436", "0.6211196", "0.59231627", "0.59107846", "0.5880464", "0.5870288", "0.58623254", "0.58284056", "0.58262885", "0.58067703", "0.5803254", "0.57745475", "0.5774168", "0.57514584", "0.5704505", "0.5700603", "0.5693078", "0.5652558", "0.56517303", "0.5650566", "0.5643128", "0.56402546", "0.56365436", "0.56352496", "0.56278056" ]
0.6563471
0
Returns true for all hostclasses which aren't tagged as nonZDD hostclasses
def is_deployable(self, hostclass): return ((hostclass in self._hostclasses and is_truthy(self._hostclasses[hostclass].get("deployable"))) or hostclass not in self._hostclasses)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsNoHost(self):\n if self.no_host:\n return True\n return any([node.no_host for node in self.GetAncestorGroups()])", "def is_opaque(self, classobj):\n try:\n return self.instance_vars[classobj] == []\n except KeyError:\n return False", "def has_ghosts(self):\n return not np.all(self.mesh.discretization.ghosts == 0)", "def include_up_hosts(nmap_host):\n if nmap_host.status == 'up':\n return True\n return False", "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n self.logger.debug(module)\n # Skip over smirnoff_hack because it is not intended to contain any Target implementations.\n if module in [\"_dcdlib\", \"smirnoff_hack\"]: continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n self.logger.debug(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target):\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Hessian',\n 'Thermo',\n 'Hydration',\n 'Moments', \n 'OptGeoTarget',\n 'TorsionProfileTarget']\n self.logger.debug(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n pytest.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def any_public_tests(self):\n return any([not t.hidden for t in self.tests])", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def filter_dont_care(gt: NDArrayObject, class_name: str) -> bool:\n if gt == \"ignore\":\n return True\n\n if gt == class_name:\n return True\n\n else:\n return False", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def no_classes(mask):\n extrema = ImageStat.Stat(mask).extrema\n r = extrema[0][1]\n g = extrema[1][1]\n b = extrema[2][1]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def no_ext_grid(net):\n\n if net.ext_grid.in_service.sum() + (net.gen.slack & net.gen.in_service).sum() == 0:\n return True", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def in_host():\n return not in_docker()", "def is_ssh_up_on_all_instances(self, stack_id):\n instances = self.get_instance_public_ips(\n self.cfn.get_stack_instance_ids(stack_id))\n if not instances:\n return False\n if all([ssh.is_ssh_up(i) for i in instances]):\n return True\n return False", "def is_virtual_network_host():\n return False", "def is_builtin_dataclass(_cls: Type[Any]) -> bool:\n import dataclasses\n\n return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)", "def is_process_class(node):\n if isinstance(node, ClassDef):\n for b in node.bases:\n if isinstance(b, Name) and b.id == KW_PROCESS_DEF:\n return True\n return False", "def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def is_ghost(self):\n\t\treturn False", "def _has_all_host_addresses(self, addresses):\n for s_id, s_size in enumerate(self.subnets[1:]):\n for m in range(s_size):\n # +1 to s_id since first subnet is 1\n if str((s_id + 1, m)) not in addresses:\n return False\n return True", "def all_nss(classifications):\n\n return (classifications['warmup'] == 0 and classifications['slowdown'] == 0 and\n classifications['flat'] == 0)", "def __ne__(self, other):\n if not isinstance(other, NestedDiscoveredHostDimms):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())", "def any_nss(classifications):\n\n return classifications['no steady state'] > 0", "def is_all_in_one(config):\n return len(filtered_hosts(config, exclude=False)) == 1", "def is_standalone():\n\n return not any(pkg in list(main.__dict__.keys()) for pkg in Dccs.packages)" ]
[ "0.6732368", "0.61149806", "0.60162634", "0.598662", "0.5910872", "0.57612085", "0.5761027", "0.5724565", "0.57139426", "0.56720996", "0.56686395", "0.5650766", "0.5641392", "0.56388974", "0.5596937", "0.55439645", "0.55356926", "0.5530277", "0.55288005", "0.5514988", "0.54905534", "0.5474688", "0.54731774", "0.5432726", "0.5423214", "0.5399945", "0.53964704", "0.539353", "0.5364832", "0.5362249" ]
0.66115427
1
Returns the integration test for this hostclass, or None if none exists
def get_integration_test(self, hostclass): return (hostclass in self._hostclasses and self._hostclasses[hostclass].get("integration_test")) or None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_test(self,test_id):\n for test in self.suite.get_tests():\n if test.id == test_id:\n return test\n return None", "def GetHWTestSuite(self):\n hw_tests = self._run.config['hw_tests']\n if not hw_tests:\n # TODO(milleral): Add HWTests back to lumpy-chrome-perf.\n raise unittest.SkipTest('Missing HWTest for %s' % (self._bot_id,))\n\n return hw_tests[0]", "def test_runner_class(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_runner_class\")", "def get_test(self, t_id: int) -> Optional[Tests]:\n try:\n test = self.session.query(Tests).get(t_id)\n return test\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get test: {excpt}')\n return None", "def get_test_type(self):\n return self.test_type", "def GetTestExtension(self):\n\n return self.test_extension", "def test_runner_class(self) -> str:\n return pulumi.get(self, \"test_runner_class\")", "def get_implementation(self):\n return self.__capabilities[\"IMPLEMENTATION\"]", "def test_get_host(self):\n pass", "def get_test_server():\n\n #TODO: make this lazy initialization thread-safe\n if '__instance' not in globals():\n server_thread = TestServerThread(settings.SELENIUM_TESTSERVER_HOST, settings.SELENIUM_TESTSERVER_PORT)\n server_thread.start()\n server_thread._start_event.wait()\n if server_thread._error:\n raise server_thread._error\n globals()['__instance'] = server_thread\n\n return globals()['__instance']", "def test_suite_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_suite_name\")", "def _get_test(self, obj, name, module, globs, source_lines):\n if getattr(obj, '__name__', '') in self.skipped:\n return None\n return doctest.DocTestFinder._get_test(self, obj, name, module,\n globs, source_lines)", "def test_api(self) -> 'test_descriptor.TestApi':\n if not self.running_phase_state:\n raise ValueError('test_api only available when phase is running.')\n if not self._running_test_api:\n self._running_test_api = openhtf.TestApi(\n measurements=measurements.Collection(\n self.running_phase_state.measurements),\n running_phase_state=self.running_phase_state,\n running_test_state=self,\n )\n return self._running_test_api", "def get_id(self):\n return \"unittest_required_plugin\"", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def get_instance():\n if not TestConfiguration._instance:\n TestConfiguration._instance = TestConfiguration()\n return TestConfiguration._instance", "def host(self) -> \"IStageHost\":\n return self._values.get(\"host\")", "def getNodeTest(nodeTestId: int):\n\n nodeTestQuery = NodeTest.query.get(nodeTestId)\n\n if nodeTestQuery:\n return nodeTestQueryToObject(nodeTestQuery)\n else:\n return None", "def test_case(self) -> bool:\n return pulumi.get(self, \"test_case\")", "def GetTestWrapper(self):\n return ''", "def health_check_host(self) -> Optional[str]:\n return pulumi.get(self, \"health_check_host\")", "def get_host(self):\r\n return self.host", "def get(self, host):\n return self.__locusts__[host]", "def get_host(self):\n return self.host", "def service(self):\n return self.__stackdriver", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def get(self) -> Optional[es.ExpectationSuite]:\n _client = client.get_instance()\n path_params = [\n \"project\",\n _client._project_id,\n \"featurestores\",\n self._feature_store_id,\n \"featuregroups\",\n self._feature_group_id,\n \"expectationsuite\",\n ]\n\n return es.ExpectationSuite.from_response_json(\n _client._send_request(\"GET\", path_params)\n )", "def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin" ]
[ "0.6346583", "0.6162359", "0.5884836", "0.5876703", "0.55224764", "0.5468428", "0.5372038", "0.529812", "0.52784747", "0.52683514", "0.5250328", "0.52476835", "0.5240323", "0.5180137", "0.51473355", "0.51330215", "0.51204187", "0.5118088", "0.5116481", "0.50992554", "0.5088099", "0.50844663", "0.5072845", "0.5072533", "0.5064881", "0.5057838", "0.5057838", "0.5057838", "0.50508636", "0.5039868" ]
0.8695572
0
Promote AMI to specified stage. And, conditionally, make executable by production account if ami is staged as tested.
def _promote_ami(self, ami, stage): prod_baker = self._disco_bake.option("prod_baker") promote_conditions = [ stage == "tested", prod_baker, ami.tags.get("baker") == prod_baker, ] try: self._disco_bake.promote_ami(ami, stage) if all(promote_conditions): self._disco_bake.promote_ami_to_production(ami) except: logging.exception("promotion failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stage(self, stage: osbuild.Stage):", "def deploy():\n stage(branch='live', role='live')", "def setup(self, stage: Optional[str] = None) -> None:", "def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()", "def Stage(self, descriptor, app_dir, runtime, environment):\n command = self.registry.get((runtime, environment))\n\n if not command:\n # Many runtimes do not require a staging step; this isn't a problem.\n log.debug(('No staging command found for runtime [%s] and environment '\n '[%s].'), runtime, environment.name)\n return\n\n command.EnsureInstalled()\n return command.Run(self.staging_area, descriptor, app_dir)", "def _stage(self):\n\n local_source_path = join(BespokeGlobals.ABS_LOCAL_TOOLS,\n self._tool.install_properties['source_file'])\n\n self._remote_target_path = join(self._sut.bespoke_root,\n BespokeGlobals.TOOLS,\n self._tool.install_properties['source_file'])\n\n if isfile(local_source_path):\n self._staf_file_copy(local_source_path, self._remote_target_path)\n else:\n raise CoreError('Failed to stage tool \"{0}\" on remote machine! The file/directory '\n '\"{1}\" does not exist!'.format(self._tool.name, local_source_path))", "def stage():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Ask user for a new version\n _config['version'] = git.prompt_tag('Enter a new version number',\n unique=True)\n\n # Build version\n # use execute to allow for other implementations of 'build'\n execute('build')\n\n # Commit/push/tag\n with lcd(env.project_path):\n with settings(warn_only=True):\n local('git add build')\n # support builds where there's no change; sometimes comes up when\n # reusing a tag because of an unexpected problem\n with settings(warn_only=True):\n msg = local('git commit -m \"Release %(version)s\"' % _config,capture=True)\n if 'nothing to commit' in msg:\n warn(msg)\n warn('continuing anyway')\n elif not msg.startswith('[master'):\n abort(\"Unexpected result: %s\" % msg)\n local('git push')\n\n git.push_tag(_config['version'])\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, _config['version'])\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))", "def process_deploybuild ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n s3_infra_conn,\n vpc,\n base_name,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params ) :\n APP_NAME = app_name.upper( )\n master_tomcat_ami_name = 'Master-Tomcat.v8'\n master_nodejs_ami_name = 'Master-NodeJS.v6'\n master_python_ami_name = 'Master-Python.v2'\n deployment_ami_name = params.get( 'source-ami' )\n deployment_secgrp = get_deployment_secgrp_name( )\n deployment_keypair = get_keypair_name( aws_account_type, vpc.region.name, 'deployment' )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n deployment_keypair_file = os.environ[ 'HOME' ] + '/.ssh/' + deployment_keypair + '.pem'\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( base_name ), app_name )\n if not deployment_ami_name :\n print \"No AMI found, defaulting to master AMI!\"\n if app_type == 'jee' :\n deployment_ami_name = master_tomcat_ami_name\n elif app_type == 'nodejs' :\n deployment_ami_name = master_nodejs_ami_name\n elif app_type == 'python' :\n deployment_ami_name = master_python_ami_name\n\n print \"Deployment ami to be used: \" + deployment_ami_name\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n ##\n ## Launch the deployment server to deploy the new warfile to\n ##\n print \"Launching deployment instance.\"\n deploy_i = launch_instance_nonvpc( ec2_conn = ec2_conn,\n ami = deployment_ami,\n base_name = base_name,\n instance_type = APP_NAME + '-DEPLOYMENT',\n keypair = deployment_keypair,\n machine_type = 't1.micro',\n security_group = deployment_secgrp )\n deployment_servername = deploy_i.public_dns_name\n print \"Waiting for SSH to be available on deployment server\"\n sshd_started = wait_on_server_sshd( deployment_servername, deployment_keypair_file )\n if not sshd_started :\n print \"SSH is not available after a long time! \" + deployment_servername\n sys.exit( 3 )\n\n ##\n ## Update the instance software before deploying the new code.\n ##\n update_os = params.get( 'update-os', 'YES' ) == 'YES'\n if update_os :\n if app_type == 'jee' :\n os_update_cmd = 'sudo yum -y update'\n elif app_type == 'nodejs' :\n os_update_cmd = 'sudo yum update -y --enablerepo=epel'\n elif app_type == 'python' :\n os_update_cmd = 'sudo yum -y update'\n\n ssh_call( deployment_servername, deployment_keypair_file, os_update_cmd )\n\n # Deploy the code to the server based on app_type\n if app_type == 'jee' :\n print \"Waiting for HTTP to be available on deployment server\"\n tomcat_started = wait_on_server_httpd( deployment_servername )\n if not tomcat_started :\n print \"Tomcat is not available after a long time! \" + deployment_servername\n sys.exit( 4 )\n\n ##\n ## Deploy the warfile to the deployment server\n ##\n print \"Deploying warfile to deployment server\"\n deploy_warfile ( deployment_servername,\n deployment_keypair_file,\n app_name,\n params[ 'warfile-path' ] )\n\n elif app_type == 'nodejs' :\n print \"Stopping existing node instance.\"\n ssh_call( deployment_servername, deployment_keypair_file, 'sudo /etc/init.d/nodejs stop' )\n print \"Deploying source files to deployment server\"\n src_dir = params[ 'src-dir' ]\n tgt_dir = '/usr/share/node'\n status = bulk_upload( deployment_servername, deployment_keypair_file, src_dir, tgt_dir )\n if status != 0 :\n print \"Unable to upload source files to the deployment server!\"\n sys.exit( 5 )\n print \"Updating project dependencies on deployment server\"\n status = ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && sudo npm install' )\n if status != 0 :\n print \"Unable to update project dependencies on deployment server!\"\n sys.exit( 5 )\n print \"If found bower.json file, install bower and download bower resource.\"\n status = ssh_call(deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && [ -f bower.json ] && (yes | sudo bower install --allow-root) || echo \"Not found bower.json file\"')\n if status != 0 :\n print \"Action of node js plugin bower failed!\"\n sys.exit( 5 )\n elif app_type == 'python':\n src_dir = params[ 'src-dir' ]\n tgt_dir = '/usr/share/scripts/s3-delivery-agent/'\n print \"Creating app folder\"\n ssh_call( deployment_servername, deployment_keypair_file, 'sudo mkdir -p ' + tgt_dir )\n print \"Deploying source files to deployment server\"\n status = bulk_upload( deployment_servername, deployment_keypair_file, src_dir, tgt_dir )\n if status != 0 :\n print \"Unable to upload source files to the deployment server!\"\n sys.exit( 5 )\n print \"Stopping python app.\"\n ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + '; python fatcontroller/command/stop.py' )\n print \"Updating project dependencies on deployment server\"\n status = ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && sudo pip install -r config/requirements.txt' )\n if status != 0 :\n print \"Unable to update project dependencies on deployment server!\"\n sys.exit( 5 )\n print \"Start python app\"\n print params\n print \"Current environment type: \" + get_env_type( base_name )\n status = ssh_call(deployment_servername, deployment_keypair_file, 'export SERVER_ENV=' + get_env_type( base_name ) + '; cd ' + tgt_dir + '; sudo python fatcontroller/command/startup.py ' + get_env_type( base_name ))\n if status != 0 :\n print \"Starting python app failed.\"\n sys.exit( 5 )\n\n\n ##\n ## Create AMI\n ##\n print \"Creating AMI from deployment server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = base_name + '-' + APP_NAME + '-' + timestamp\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, deploy_i, new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n ##\n ## Remove the deployment instance, since we no longer need it.\n ##\n print \"Terminating deployment instance.\"\n deploy_i.terminate( )\n\n ##\n ## Launch new instance\n ##\n print \"Find secgrp for \" + base_name + \" and \" + APP_NAME + \" result: \"\n secgrp = find_group( ec2_conn, base_name, APP_NAME )\n\n instance_subnet_cidr = params[ 'subnet-cidr' ]\n subnets = vpc_conn.get_all_subnets( filters = [ ( \"vpcId\", [ vpc.id ] ),\n ( \"cidrBlock\", [ instance_subnet_cidr ] ) ] )\n userdata = get_userdata( app_type, base_name, app_name )\n \n old_instance = find_instance_by_type(ec2_conn,base_name,app_name)\n \n print \"Launching new instance.\"\n instance = launch_instance_vpc( ec2_conn,\n new_ami,\n base_name = base_name,\n instance_type = APP_NAME,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrp.id,\n subnet_id = subnets[ 0 ].id,\n user_data = userdata,\n public_ip = True,\n wait_for_running = wait_on_launch )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( base_name ), app_name, new_ami.name )\n\n ## if there is a internal or public load balancer, re-register the new instance. Otherwise, update route53 dns record.\n ## added by yliu, 2015/06/16\n load_balancer_internal = get_elb_name( base_name, app_name )\n elb_internal = find_elb( elb_conn, load_balancer_internal )\n\n load_balancer_public = get_elb_name( base_name, app_name + '-PB' )\n elb_public = find_elb( elb_conn, load_balancer_public )\n \n new_instance_ids = [ instance.id ]\n\n if elb_internal is not None or elb_public is not None:\n if elb_internal is not None:\n print \"Adding the new app instances into the internal load balancer.\"\n\n is_terminator_now = True\n if elb_public is not None:\n is_terminator_now = False\n\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb_internal,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = is_terminator_now,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n\n if elb_public is not None:\n print \"Adding the new app instances into the public load balancer.\"\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb_public,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n print \"Added the new app instances into the public load balancer.\"\n\n else :\n public_dns_alias = create_dns_name( base_name, app_name )\n internal_dns_alias = create_internal_dns_name( base_name, app_name )\n \n if old_instance :\n print \"Terminating old instance.\"\n old_instance.terminate( )\n\n print \"Configuring DNS entry for new instance.\"\n if elb_public is not None:\n set_dns_cname( r53_conn, public_dns_alias, instance.public_dns_name )\n \n if elb_internal is not None:\n set_dns_atype( r53_conn, internal_dns_alias, instance.private_ip_address )\n\n print \"New instance is now available at: \" + public_dns_alias\n print \"New instance internal DNS name: \" + internal_dns_alias", "def provision(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #result = local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #if result != '0'\n # abort( \"Failed test - Aborting\")", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)", "def stage_dev():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Build version\n build()\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, 'dev')\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))", "def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami", "def _stage_pkg(self):\n context = self._config.context\n context.package.file = os.path.basename(context.package.arg)\n root_path = self._distro.root_mountspec.mountpoint\n stage_path = os.path.join(root_path, context.package.dir.lstrip('/'))\n context.package.full_path = os.path.join(stage_path, context.package.file)\n try:\n if any(protocol in context.package.arg for protocol in ['http://', 'https://']):\n self._download_pkg(context)\n else:\n self._move_pkg(context)\n except Exception:\n errstr = 'Exception encountered while staging package'\n log.critical(errstr)\n log.debug(errstr, exc_info=True)\n return False\n # reset to chrooted file path\n context.package.arg = os.path.join(context.package.dir, context.package.file)\n return True", "def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)", "def prepare_image_for_deploy(runtime: \"mlrun.runtimes.BaseRuntime\"):\n pass", "def on(stage):\n localhosts = ['localhost', '127.0.0.1']\n env.stage = stage\n env.context = get_context()\n hosts = env.context['hosts']\n if stage == 'dev' and len(hosts) == 1 and hosts[0] in localhosts:\n env.hosts = []\n else:\n env.hosts = env.context['hosts']", "def main(cmd_line=None):\n release_config = 'CentOS-8/master.yaml'\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger('dlrnapi_promoter')\n log.setLevel(logging.DEBUG)\n\n log.info(\"Checking for log directory\")\n log_file = os.path.expanduser(get_log_file('staging',\n release_config))\n log_dir = \"/\".join(log_file.split(\"/\")[:-1])\n if not os.path.exists(log_dir):\n log.info(\"Creating log directory : {}\".format(log_dir))\n os.makedirs(log_dir)\n config_builder = PromoterConfigFactory(config_class=StageConfig)\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger(\"promoter-staging\")\n log.setLevel(logging.DEBUG)\n\n args = parse_args(config_builder.global_defaults, cmd_line=cmd_line)\n\n if hasattr(args, \"release_config\"):\n release_config = args.release_config\n config_builder = PromoterConfigFactory(config_class=StageConfig,\n **{'log_file': log_file})\n\n config = config_builder(\"staging\", release_config,\n validate=None)\n # Export dlrn password\n os.environ['DLRNAPI_PASSWORD'] = config.dlrn['server']['password']\n staged_env = StageOrchestrator(config)\n args.handler(staged_env)\n\n if cmd_line is not None:\n return config", "def deploy(self, image_name, ip, flavor='m1.small'):\n body_value = {\n \"port\": {\n \"admin_state_up\": True,\n \"name\": self.name + '_provision',\n \"network_id\": os_utils.get_network_id(self.nova_api, 'provision_bob'),\n 'fixed_ips': [{'ip_address': ip}]}}\n response = self.neutron.create_port(body=body_value)\n self._provision_port_id = response['port']['id']\n self.mac = response['port']['mac_address']\n\n image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name)\n flavor_id = os_utils.get_flavor_id(self.nova_api, flavor)\n # TODO(Gonéri): We don't need keypair for the BM nodes\n keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair)\n # Ensure with get DHCP lease on the provision network first\n nics = [{'port-id': self._provision_port_id}]\n\n self._os_instance = os_provisioner.build_openstack_instance(\n self.nova_api,\n self.name,\n image_id_to_boot_from,\n flavor_id,\n keypair_id,\n nics)\n\n if not self._os_instance:\n LOG.error(\"deployment has failed\")\n raise Exception()\n\n os_provisioner.add_provision_security_group(self.nova_api)\n os_utils.add_security_groups(self._os_instance, ['provision'])\n os_utils.add_security_groups(self._os_instance, self._security_groups)\n LOG.info(\"add security groups '%s'\" % self._security_groups)\n LOG.info(\"instance '%s' ready to use\" % self.name)\n\n # the instance should be off for Ironic\n self._os_instance.stop()", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def test_stage_pre_boot(self, mock_stage_pre_boot):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_pre_boot.called)", "def process_deployapp ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n s3_infra_conn,\n r53_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params,\n monitor_params = None ) :\n target_env = base_name\n APP_NAME = app_name.upper( )\n deployment_ami_name = params.get( 'source-ami' )\n source_env = params[ 'source-env' ]\n TARGET_ENV = target_env.upper( )\n SOURCE_ENV = source_env.upper( )\n load_balancer = get_elb_name( target_env, app_name )\n instance_name = get_instance_name( target_env, app_name )\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n if not monitor_params :\n monitor_params = params.get( 'monitors' )\n\n instance_secgrp_name = get_secgrp_name( target_env, app_name )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( SOURCE_ENV ), app_name )\n if not deployment_ami_name :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n subnets = get_vpc_subnets( vpc_conn, vpc, params.get( 'subnet-type', 'PRIVATE' ) )\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ instance_secgrp_name ] } )\n \n userdata = get_userdata( app_type, TARGET_ENV, app_name )\n \n new_instances = []\n num_instances = int( params.get( 'num-instances', len( subnets ) ) )\n if num_instances > len( subnets ) :\n num_instances = len( subnets )\n\n while num_instances > 0 :\n instance = launch_instance_vpc( ec2_conn,\n deployment_ami,\n base_name = base_name,\n instance_type = app_name,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrps[ 0 ].id ,\n subnet_id = subnets[ num_instances - 1 ].id,\n user_data = userdata,\n public_ip = False,\n wait_for_running = wait_on_launch )\n new_instances.append( instance )\n\n if monitor_params :\n print \"Setting alarms on the instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, APP_NAME, base_topicarn, monitor_params )\n\n num_instances -= 1\n\n new_instance_ids = [ i.id for i in new_instances ]\n\n if ( wait_on_launch ) :\n print \"Waiting for instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, new_instance_ids )\n\n print \"Creating AMI from instance server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = target_env + '-' + APP_NAME + '-' + timestamp\n ami_instance = new_instances[ 0 ]\n if not wait_on_launch :\n # We must wait for at least the ami instance to be available so we can create a new AMI from it.\n wait_on_object_state( ami_instance, 'running' )\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, new_instances[ 0 ], new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( TARGET_ENV ), app_name, new_ami.name )\n\n print \"Adding the new app instances into the load balancer.\"\n elb = find_elb( elb_conn, load_balancer )\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n if not status :\n print \"WARNING: Not all new app instances came up in the load balancer! Check the load balancer.\"\n\n print \"Deployment complete.\"", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def provision(project, node, img, network, nic):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.IMAGE_NAME_PARAMETER: img,\n constants.NETWORK_PARAMETER: network,\n constants.NIC_PARAMETER: nic}\n res = requests.put(_url + \"provision/\", data=data,\n auth=(_username, _password))\n click.echo(res.content)", "def set_stage(stage):\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"w\")\n f.write(\"%s\\n\" % stage)\n f.close()\n logger.debug(\"set stage: %s\" % (stage))\n except:\n raise AssertionError(\"Unable to save setup/teardown stage! %s\" % (sys.exc_info()[1]))\n return stage", "def deploy_stack():\n build = \"sam build --use-container --manifest src/images/requirements.txt\"\n local(build)\n\n #package = f\"sam package --template-file template.yaml --output-template-file \\\n # packaged.yaml --s3-bucket {env.bucket_name} --region {env.aws_region}\"\n #local(package)\n\n deploy = f\"sam deploy --stack-name storge-machine-service \\\n --s3-bucket {env.bucket_name}\\\n --parameter-overrides env=dev --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region {env.aws_region}\"\n #deploy = \"sam deploy\"\n local(deploy)", "def _pre_provisioning_steps(self, context, res_id, data, res_inventory):\n LOG.info(\"[%s] Executing pre provisioning steps\" % res_id)\n expected_state = [eon_const.EON_RESOURCE_STATE_IMPORTED]\n state_in_db = res_inventory.get(eon_const.EON_RESOURCE_STATE)\n # if state not imported raise error\n self.validator.validate_state(expected_state, state_in_db)\n # if resource not baremetal, raise error\n type_in_db = res_inventory.get(eon_const.EON_RESOURCE_TYPE)\n self.validator.validate_type(type_in_db,\n eon_const.EON_RESOURCE_TYPE_BAREMETAL)\n next_state = eon_const.RESOURCE_STATE_PROVISON_INITIATED\n self.virt_utils.update_prop(context, res_id,\n eon_const.EON_RESOURCE_STATE,\n next_state)\n res_inventory[\"state\"] = next_state\n\n # update the type from baremetal to given resource type\n type_ = data[eon_const.EON_RESOURCE_TYPE]\n self.virt_utils.update_prop(context, res_id, 'type',\n type_)\n res_inventory[\"type\"] = data[\"type\"]\n LOG.debug(\"[%s] pre provisioning comple\" % res_id)", "def then_app_running_stage(context):\n result = context.result\n result | should.equal('Success').desc(\"Application is reachable in the Stage stage.\")", "def execute_sagemaker_remote_tests(image):\n pytest_command, path, tag, job_type = generate_sagemaker_pytest_cmd(image, SAGEMAKER_REMOTE_TEST_TYPE)\n context = Context()\n with context.cd(path):\n context.run(f\"virtualenv {tag}\")\n with context.prefix(f\"source {tag}/bin/activate\"):\n context.run(\"pip install -r requirements.txt\", warn=True)\n res = context.run(pytest_command, warn=True)\n metrics_utils.send_test_result_metrics(res.return_code)\n if res.failed:\n raise DLCSageMakerRemoteTestFailure(\n f\"{pytest_command} failed with error code: {res.return_code}\\n\"\n f\"Traceback:\\n{res.stdout}\"\n )", "def transition_model_version_stage(self, stage):\n try:\n for model in self.client.search_model_versions(f\"name='{self.model_name}'\"):\n if model.current_stage in ['Staging', 'Production']:\n self.client.transition_model_version_stage(\n name=model.name,\n version=model.version,\n stage=\"Archived\"\n )\n logging.info(f'Transitioning {model.name}/{model.version} to Archived')\n\n self.client.transition_model_version_stage(\n name=self.model_name,\n version=self.model_version,\n stage=stage\n )\n logging.info(f'Model transitioned to {stage}')\n\n except Exception as e:\n logging.error(e)" ]
[ "0.600044", "0.58983445", "0.5597183", "0.55506766", "0.55401236", "0.5401714", "0.5389094", "0.53601116", "0.5331389", "0.52924347", "0.526997", "0.52426094", "0.5238283", "0.5174726", "0.5095269", "0.5092394", "0.50896496", "0.5085712", "0.5071455", "0.50698596", "0.50681335", "0.5065932", "0.5060449", "0.50470936", "0.50415754", "0.5012595", "0.5008688", "0.50085163", "0.49992743", "0.49952036" ]
0.7794168
0
Line is correctlt split and missing/corrupetd fields are checked.
def test_read_line(self): expected_data = ['\"lu, jr\"','ming-yuan','\"DRUG,1\"',135.999,True,3] input_string = '001,\"LU, JR\",MING-YUAN,\"DRUG,1\",135.999\n' data = read_line(input_string) self.assertEqual(expected_data[0],data[0]) self.assertEqual(expected_data[1],data[1]) self.assertEqual(expected_data[2],data[2]) self.assertAlmostEqual(expected_data[3],data[3]) self.assertEqual(expected_data[4],data[4]) self.assertAlmostEqual(expected_data[5],data[5]) #Check for odd numers of quotation marks input_string = '001,\"LU\",\"MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) #Check for missing fields input_string = '001,,MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) input_string = '001,LU,MING-YUAN,DRUG1,\n' data = read_line(input_string) self.assertFalse(data[4]) #Check for corrupted fields input_string = '001x,LU,MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) input_string = '001,LU,MING-YUAN,DRUG1,1ag5\n' data = read_line(input_string) self.assertFalse(data[4])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_Lines(self):\n\n pass", "def check_record(idline,nclline,sepline,qualiline):\n return check_idline(idline) and check_sepline(sepline)", "def is_line(self): \n return False", "def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True", "def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )", "def checkLineStandardCompliance(line):\n if len(line) != 5:\n print(line + \" HAS WRONG NUMBER OF COLUMNS: \" + str(len(line)))\n exit(5)", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True", "def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)", "def validate_line(self, line):\n splitline = line.split('\\t')\n if len(splitline) is not 9:\n return []\n if not \"ID\" in splitline[8]:\n return []\n if not int(splitline[3]) <= int(splitline[4]):\n return []\n # Everything except genes must have parent id\n if not \"Parent\" in splitline[8] and not splitline[2] == \"gene\":\n return []\n return splitline", "def testSplitLine_one_split():\n line = np.array([1, 2, 3, 3, 3, 4, 5])\n split_lines = splitLine(line)\n if np.all(np.concatenate(split_lines) == line):\n print(str(np.concatenate(split_lines)), \" == \", str(line))\n else:\n print(str(np.concatenate(split_lines)), \" != \", str(line))", "def test_parse_no_fields(self):\n received = self._p.parse_line(self._line)\n expected = {}\n msg = 'Line parse with no fields should return None'\n self.assertDictEqual(received, expected, msg)", "def check_line(self, line):\n line = line.rstrip('\\r\\n')\n try:\n line = line.decode('utf8')\n except:\n pass\n return self.rules['all'].validate(line)", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def testSplitLine_two_splits():\n line = np.array([1, 2, 3, 3, 3, 4, 4, 4, 5])\n split_lines = splitLine(line)\n if np.all(np.concatenate(split_lines) == line):\n print(str(np.concatenate(split_lines)), \" == \", str(line))\n else:\n print(str(np.concatenate(split_lines)), \" != \", str(line))", "def test_line_split():\n for _x in range(100):\n delim = choice((\"=\", \"|\", \",\", \"$\", \".\", \"/\"))\n l_str = delim.join([random_str(5, 10) for x in range(30)])\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Split the string\n l_parts = line.split(delim)\n exp_parts = l_str.split(delim)\n assert len(l_parts) == len(exp_parts)\n for l_part, x_part in zip(l_parts, exp_parts):\n assert isinstance(l_part, Line)\n assert l_part == x_part\n assert l_part.file == line.file\n assert l_part.number == line.number", "def emptyline(self):", "def _raise_if_not_line(self, l: float):\n # todo: check, if line exists -> if not, causes crash (raise exception before!)\n pass", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0", "def parse_line(self, line):\n raise NotImplementedError", "def is_line(self):\n return False", "def __input_data_ok(self, line=None):\n # valid pattern: 1407478022|www.facebook.com\n valid_pattern = re.compile(\"\\w{10}\\|\\w+\")\n if (line) and (re.match(valid_pattern, line)):\n return True\n else:\n return False", "def check_line(self, line: str, line_num: int) -> Lints:\n yield from super().check_line(line, line_num)\n\n if self.in_header and line != \"---\\n\":\n for m in self.TAG_QUOTED.finditer(line):\n yield LintError(\"header-tags-quoted\", line_num, m.start())", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def check_line(self, line):\n return int(line) in self.__bus_dict", "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def is_line(self):\n return True", "def is_line(self):\n return True" ]
[ "0.68945944", "0.65980923", "0.64360386", "0.64280456", "0.640811", "0.6315471", "0.62812597", "0.6233146", "0.61786985", "0.617687", "0.6170383", "0.61697614", "0.6118626", "0.6044029", "0.6039419", "0.6007905", "0.60070646", "0.6002585", "0.5974007", "0.5965619", "0.5962297", "0.5952723", "0.5933941", "0.5921769", "0.5890514", "0.58857", "0.5883305", "0.5880509", "0.5875158", "0.5875158" ]
0.6924609
0
Unique drug list dict is correctly returned.
def test_get_unique_drug_list(self): dict1 = self.test_dict dict2 = get_unique_drug_list(self.test_sorted_tuple) self.assertEqual(dict1, dict2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def getDrugData(self, moleculeChEMBLIdList):\n oD = {}\n chunkSize = 50\n try:\n for ii in range(0, len(moleculeChEMBLIdList), chunkSize):\n drug = new_client.drug # pylint: disable=no-member\n drug.set_format(\"json\")\n mDL = drug.filter(molecule_chembl_id__in=moleculeChEMBLIdList[ii : ii + chunkSize])\n if mDL:\n logger.info(\"mDL (%d)\", len(mDL))\n for mD in mDL:\n oD.setdefault(mD[\"molecule_chembl_id\"], []).append(mD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "def test_magicdictlist_dedupe():\n d1 = magicdictlist()\n\n d1['key1'].append('1 hello')\n d1['key1'].append('1 world')\n d1['key2'].append('2 hello')\n d1['key1'].append('1 world')\n\n d2 = d1.dedupe()\n assert len(d2) == 2\n assert len(d2['key1']) == 2\n assert len(d2['key2']) == 1\n assert set(d2['key1']) == set(['1 hello', '1 world'])\n assert d2['key2'] == ['2 hello']", "def __init__(self):\n self.d = collections.defaultdict(list)", "def _get_rekey_ddi_data(ddi_data):\n for enum, item in enumerate(ddi_data):\n ddi_data[enum] = dict((d['network'],\n dict(d, index=index))\n for (index, d) in enumerate(item))\n return ddi_data", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def _create_dictionary_of_ned_d(\n self):\n self.log.debug(\n 'starting the ``_create_dictionary_of_ned_d`` method')\n\n count = 0\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n totalRows = sum(1 for row in csvReader)\n csvFile.close()\n totalCount = totalRows\n\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n theseKeys = []\n dictList = []\n for row in csvReader:\n if len(theseKeys) == 0:\n totalRows -= 1\n if \"Exclusion Code\" in row and \"Hubble const.\" in row:\n for i in row:\n if i == \"redshift (z)\":\n theseKeys.append(\"redshift\")\n elif i == \"Hubble const.\":\n theseKeys.append(\"hubble_const\")\n elif i == \"G\":\n theseKeys.append(\"galaxy_index_id\")\n elif i == \"err\":\n theseKeys.append(\"dist_mod_err\")\n elif i == \"D (Mpc)\":\n theseKeys.append(\"dist_mpc\")\n elif i == \"Date (Yr. - 1980)\":\n theseKeys.append(\"ref_date\")\n elif i == \"REFCODE\":\n theseKeys.append(\"ref\")\n elif i == \"Exclusion Code\":\n theseKeys.append(\"dist_in_ned_flag\")\n elif i == \"Adopted LMC modulus\":\n theseKeys.append(\"lmc_mod\")\n elif i == \"m-M\":\n theseKeys.append(\"dist_mod\")\n elif i == \"Notes\":\n theseKeys.append(\"notes\")\n elif i == \"SN ID\":\n theseKeys.append(\"dist_derived_from_sn\")\n elif i == \"method\":\n theseKeys.append(\"dist_method\")\n elif i == \"Galaxy ID\":\n theseKeys.append(\"primary_ned_id\")\n elif i == \"D\":\n theseKeys.append(\"dist_index_id\")\n else:\n theseKeys.append(i)\n continue\n\n if len(theseKeys):\n count += 1\n if count > 1:\n # Cursor up one line and clear line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n if count > totalCount:\n count = totalCount\n percent = (float(count) / float(totalCount)) * 100.\n print \"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\" % locals()\n rowDict = {}\n for t, r in zip(theseKeys, row):\n rowDict[t] = r\n if t == \"ref_date\":\n try:\n rowDict[t] = int(r) + 1980\n except:\n rowDict[t] = None\n\n if rowDict[\"dist_index_id\"] != \"999999\":\n dictList.append(rowDict)\n\n csvFile.close()\n\n self.log.debug(\n 'completed the ``_create_dictionary_of_ned_d`` method')\n return dictList", "def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data", "def _uniq( list ) : \r\n \r\n d = {} \r\n for e in list : \r\n d[e] = 1 \r\n \r\n return d.keys()", "def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())", "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def dangling_pic_list(pic):\n if pic and not pic.person_set.count():\n ids.append(pic.key().id())", "def item_duplicate():\n return {'name':'chair',\n 'value':300}", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))", "def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))", "def to_listing_dict(self) -> dict:\n data = super().to_listing_dict()\n return data", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def getSet(unique_name):", "def getSet(unique_name):", "def unique(self, key, lst=None):\n d = self.find(key, lst)\n vals = set(d.values())\n return sorted(list(vals))", "def get_sensor_dict():\n\n with open('last_seen.json') as json_file:\n stored_dict = json.load(json_file)\n\n new_list = []\n for dev in stored_dict['devices']:\n new_list.append(dev['id'])\n unique_list = list(set(new_list))\n\n return stored_dict, unique_list", "def __init__(self):\n self.d = defaultdict(list)", "def shuffled_data_reset(self):\n self.unique_data = {}\n for stock in self.stocks:\n self.unique_data[stock] = []\n for date in self.dates:\n self.unique_data[stock] += [date]", "def drug_names():\n results = set()\n if 'qry' in request.args and len(request.args['qry']) >= 3:\n look_for = f\"{request.args['qry'].lower()}%\"\n drug_list = FTA.find_by_name(look_for, False )\n results = set([f\"{d.PROPRIETARY_NAME} - {d.NONPROPRIETARY_NAME}\" for d in drug_list if d.ACTIVE])\n\n results = sorted(list(results))\n return jsonify(results)", "def dgen(self, d,k,v):\n\t\tif k not in d.keys():\n\t\t\td.setdefault(k,{})\n\t\t\td[k].update(v)\n\t\telif k in d.keys():\n\t\t\t# remove psscan colors if pslist already found something\n\t\t\tif \"color\" in d[k].keys():\n\t\t\t\tif d[k][\"color\"] == \"black\":\n\t\t\t\t\tif \"color\" in v.keys():\n\t\t\t\t\t\tif v[\"color\"] == \"blue\":\n\t\t\t\t\t\t\tdel v[\"color\"]\n\t\t\t\t\tif \"fillcolor\" in v.keys():\n\t\t\t\t\t\tif v[\"fillcolor\"] == \"cyan\":\n\t\t\t\t\t\t\tdel v[\"fillcolor\"]\n\t\t\td[k].update(v)", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)" ]
[ "0.6729187", "0.5912013", "0.57591486", "0.5738331", "0.5666848", "0.5649718", "0.5634769", "0.562001", "0.56132656", "0.5601305", "0.55995375", "0.55618244", "0.55507255", "0.5541687", "0.5520323", "0.5516712", "0.5491589", "0.54879695", "0.54779065", "0.54644716", "0.5451422", "0.5442167", "0.5442167", "0.542725", "0.5418897", "0.540187", "0.5390148", "0.538011", "0.5379323", "0.5368923" ]
0.68579596
0
Total cost of each drug is correct.
def test_get_total_cost_each_drug(self): list1 = self.test_total_cost_each_drug list2 = get_total_cost_each_drug(self.test_sorted_tuple, self.test_dict) self.assertEqual(list1, list2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(self) -> float:", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def get_expected_cost(self):", "def patrimony_total(self):\n pass", "def calculate_total_cost(state):\n pass", "def calculate_total_cost(state):\r\n return state.cost()", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def total_cost(self):\r\n return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101\r", "def cost(self):\n\t\treturn self.g + self.h", "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def total(self):\n gd_total = self._grand_total()\n counts = self._get_as_dict_count()\n for rule in self.rules:\n gd_total += rule(counts)\n return gd_total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def cost(self):\n return self._cost", "def cost(self):\n return self._cost", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "def total_donations(self):\n return sum(self.donations)", "def total_donations(self):\n return sum(self.donations)", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost" ]
[ "0.7039642", "0.68268335", "0.6784941", "0.67635846", "0.6760325", "0.66375047", "0.66135275", "0.65899247", "0.65713847", "0.6541224", "0.6456955", "0.6456513", "0.6444531", "0.6374481", "0.6351378", "0.63261276", "0.6230316", "0.6212625", "0.6205208", "0.6195797", "0.6188748", "0.61811465", "0.61560684", "0.615034", "0.615034", "0.6142691", "0.6112799", "0.6112799", "0.6099505", "0.60935885" ]
0.68602717
1
Checks if a string is a permutation of a palindrome by populating a map and counting the occurrences of letters. O(N)
def is_palindrome_permutation(string): letter_to_count = dict() for letter in string: letter_to_count[letter] = letter_to_count.get(letter, 0) + 1 residual = 0 for count in letter_to_count.values(): residual += count % 2 # there are can be a single letter with an odd character count when the palindrome is of odd length return residual <= 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def palindrome_permutation(s):\n char_dict = {}\n for i in s:\n if i in char_dict:\n char_dict[i] += 1\n else:\n char_dict[i] = 1\n numOdd = 0\n for key in char_dict:\n if key != ' ':\n if char_dict[key] % 2 == 1:\n numOdd += 1\n if numOdd < 2:\n print_permutation(char_dict)\n return True\n else:\n return False", "def is_palindrome_permutation(input_string):\n input_string = input_string.lower()\n input_string = ''.join(input_string.split())\n\n number_chars = {}\n number_even_chars = 0\n\n for char in input_string:\n if char in number_chars:\n number_chars[char] += 1\n else:\n number_chars[char] = 1\n\n for char in number_chars:\n if number_chars[char] % 2 != 0:\n number_even_chars += 1\n if number_even_chars >= 2:\n return False\n\n return True", "def palindromePermutation(s):\n char_count = {}\n for character in s:\n if character == ' ': continue # skip the spaces.\n char_count[character] = char_count.get(character, 0) + 1\n\n odd = False\n for key in char_count:\n if char_count[key] % 2 != 0:\n if odd:\n return False\n odd = True\n\n return True \n\n # Time Complexity: O(n)\n # Space Complexity: O(m), where m is the number of unique characters", "def check_permutation_of(string1,string2):\n if len(string1) != len(string2): #O(1)\n return False\n return collections.Counter(string1) == collections.Counter(string2) #O(n+n) to make the dictionaries\n #O(n+n) to compare equality?\n #so O(4n) == O(n).", "def permutation(string):\n i = 0\n j = len(string) - 1\n while i < j:\n if string[i] != string[j]:\n return False\n i += 1\n j -= 1\n return True", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def checkPermutation(s, t):\n\n # Count each unique letter in both strings and compare the two dicts.\n s_count = {}\n t_count = {}\n for character in s:\n s_count[character] = s_count.get(character, 0) + 1\n\n for character in t:\n t_count[character] = t_count.get(character, 0) + 1\n\n return s_count == t_count\n\n # Time Complexity: O(n)\n # Space Complexity: O(n)", "def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome", "def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False", "def sherlockAndAnagrams(s):\n # A Dict of palindromes and their counts.\n palindrome_counts = {}\n\n # Get all substrings of length len(s)/c\n for substring_length in range(len(s) - 1):\n for substring_starting_index in range(len(s) - substring_length):\n substring_end_index = substring_starting_index + substring_length + 1\n substring = s[substring_starting_index:substring_end_index]\n # TODO: Sorting is an inefficient way to \"hash\" by palindrome.\n # A letter count dict would be more efficient (in the initial grouping).\n substring_arr = list(substring)\n substring_arr.sort()\n sorted_substring = \"\".join(substring_arr)\n\n if palindrome_counts.get(sorted_substring):\n palindrome_counts[sorted_substring] += 1\n else:\n palindrome_counts[sorted_substring] = 1\n\n return sum([_two_of_m(val) for val in palindrome_counts.values()])", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def checkPermutation(string1, string2):\n string1_content = {}\n # Hash the first string\n for i in string1:\n if string1_content.get(i) is None:\n string1_content[i] = 1\n else:\n string1_content[i] += 1\n\n # For each character in the section string, search for it\n for i in string2:\n if string1_content.get(i) is None:\n return False\n string1_content[i] -= 1\n\n # Make sure every character in the first string had a matching character in the second string\n for key, value in string1_content.items():\n if value != 0:\n return False\n return True", "def is_anagram_of_palindrome(word):\n\n counts = {}\n num_of_odd_occurences = 0\n\n for char in word:\n counts[char] = counts.get(char, 0) + 1\n for val in counts.values():\n if val % 2 != 0:\n num_of_odd_occurences += 1\n\n return num_of_odd_occurences <= 1", "def is_permutation(str1, str2):\n\n chars = dict()\n\n def check_chars(ch, can_add, word_index):\n \"\"\"\n\n :param ch: the character we're looking for\n :param can_add: boolean which states if we can add more items to the dict\n :param word_index: int to identify the word\n :return: void\n \"\"\"\n if ch not in chars and can_add:\n chars[ch] = [False, word_index]\n else:\n chars[ch] = [True, word_index]\n\n n1 = len(str1)\n n2 = len(str2)\n for i in range(0, max(n1, n2)):\n if i < n1:\n check_chars(str1[i], i < n1, 1)\n if i < n2:\n check_chars(str2[i], i < n2, 2)\n\n word = None\n for ch in chars:\n if not chars[ch][0]:\n if word is None:\n word = chars[ch][1]\n elif word is not chars[ch][1]:\n return False\n return True", "def is_palindrome(text):\n\n # Property of a palindrome:\n # There be a maximum of only one letter that sums to an odd number\n \n char_count = {}\n # edge cases\n # Consider empty text as palindrome\n \n for char in text:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n odd_count = 0\n for count in char_count.values():\n if count % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n \n return True", "def check_palindrome(word):\r\n char_count = {} #char count hash\r\n for char in word:\r\n if char in char_count:\r\n char_count[char] += 1\r\n else:\r\n char_count[char] = 1\r\n odd_count = 0 #counting number of odd nos encountered\r\n for count in char_count.values():\r\n if count % 2 != 0:\r\n odd_count += 1\r\n len_word = len(word)\r\n if len_word % 2 == 0:\r\n if odd_count >= 1:\r\n return False\r\n else:\r\n if odd_count > 1:\r\n return False\r\n return True", "def build_permutation_dictionary(input_string):\n string_contents = {}\n\n for char in input_string:\n if char not in string_contents:\n string_contents[char] = 0\n else:\n string_contents[char] += 1\n\n return string_contents", "def is_perm(str1, str2):\n\n if len(str1) != len(str2):\n return False\n\n char_ct = defaultdict(int)\n\n for char in str1:\n char_ct[char] += 1\n\n for char in str2:\n char_ct[char] -= 1\n\n if char_ct[char] < 0:\n return False\n\n return True", "def isPalindromes(s):\n\n def toChar(s):\n s= s.lower()\n letters=''\n for c in s:\n if c in \"abcdefgh\":\n letters= letters+c\n return letters\n\n def isPal(s):\n if len(s) <=1:\n return True\n else:\n return s[0]==s[-1] and isPal(s[1:-1])\n return isPal(toChar(s))", "def is_permutation(a, b):\n a, b = str(a), str(b)\n return(len(a) == len(b) and Counter(a) == Counter(b))", "def longestPalindrome(self, s: str) -> int:\n # approach #1 -- using hashset\n # approach 2 -- using hashmap\n hashmap = defaultdict(int)\n odd = 0\n out = 0\n for char in s:\n hashmap[char] += 1\n\n for key, val in hashmap.items():\n if val % 2 == 1:\n odd = 1\n out += (val -1)\n else:\n out += val\n return out +odd", "def permutation_strings(input, input_two):\n if len(input) != len(input_two):\n return False\n else:\n return sorted(input) == sorted(input_two)", "def check_pal(s):\n counts = df(int)\n len_without_spaces = 0\n # Count all nonspaces\n for c in s:\n if c != ' ':\n counts[c.lower()] += 1\n len_without_spaces += 1\n # Now find out how many chars occur an odd number of times\n odd_chars = 0\n for c in counts:\n if counts[c] % 2 != 0:\n odd_chars += 1\n # If string length is even there must be no odd counts\n if len_without_spaces % 2 == 0 and odd_chars == 0:\n return True\n # If string is odd there must be exactly one odd count\n if len_without_spaces % 2 != 0 and odd_chars == 1:\n return True\n # Else, it's not a palindrome\n return False", "def substrCount(n, s):\r\n lst = []\r\n character = s[0]\r\n count = 1\r\n result = 0\r\n for i in range(1, n):\r\n if s[i] == character:\r\n count += 1\r\n else:\r\n lst.append((character, count))\r\n character = s[i]\r\n count = 1\r\n lst.append((character, count))\r\n\r\n for tpl in lst:\r\n \"\"\"calculate all possible palindromes created from same characters that are close to each other\r\n E.g: aaa => 6 possibles (3*4//2 = 6)\r\n \"\"\"\r\n result += tpl[1] * (tpl[1] + 1) // 2\r\n\r\n for i in range(1, len(lst) - 1):\r\n if lst[i - 1][0] == lst[i + 1][0] and lst[i][1] == 1:\r\n \"\"\"\r\n check palindromes created from 3 tuples with a different character in between\r\n \"\"\"\r\n result += min(lst[i - 1][1], lst[i + 1][1])\r\n\r\n return result", "def is_anagram_of_palindrome(word):\n # palindrome has either exactly 2 of each letter in the word\n # or two of each letter revolving around one in the middle\n # An anagram rescrambles the letters\n chars = []\n\n # loop over the word\n # append chars to the list\n # if we see the char in list again, remove it.\n # if there is only one char or no chars in list\n # return True\n # else, return false\n\n for char in word:\n if char in chars:\n chars.remove(char)\n else:\n chars.append(char)\n if len(chars) >= 2:\n return False\n else:\n return True", "def sherlockAndAnagrams(s):\n\n dic = {}\n\n count = 0\n for i in range(len(s)):\n for j in range(i+1, len(s)+1):\n substrings = sorted(list(s[i:j]))\n joined_ss = ''.join(substrings)\n if joined_ss != '':\n if joined_ss in dic:\n count += dic[joined_ss]\n dic[joined_ss] += 1\n else:\n dic[joined_ss] = 1 \n print(dic)\n return count", "def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False", "def are_anagrams(str_1, str_2):\r\n if len(str_1) != len(str_2):\r\n return False\r\n letters_nb_1 = [0] * 256\r\n for char in str_1:\r\n letters_nb_1[ord(char.lower())] += 1\r\n\r\n for char in str_2:\r\n char_ord = ord(char.lower())\r\n if letters_nb_1[char_ord] > 0:\r\n letters_nb_1[char_ord] -= 1\r\n else:\r\n return False\r\n return letters_nb_1 == [0] * 256", "def check_permutation2(u, v):\n u_chars = {}\n for c in u:\n try:\n u_chars[c] += 1\n except KeyError:\n u_chars[c] = 1\n\n v_chars = {}\n for d in v:\n try:\n v_chars[d] += 1\n except KeyError:\n v_chars[d] = 1\n\n if sum(u_chars.values()) != sum(v_chars.values()):\n #u and v are not of the same length.\n return False\n\n for c in u:\n c_count_in_u = u_chars[c]\n c_count_in_v = v_chars.get(c, 0)\n if c_count_in_u != c_count_in_v:\n return False\n\n return True" ]
[ "0.79791397", "0.7845905", "0.7619931", "0.7566796", "0.72823507", "0.7219113", "0.72079515", "0.71518576", "0.7108441", "0.6992611", "0.69697845", "0.6915593", "0.6882766", "0.6821809", "0.6709572", "0.6699704", "0.65028495", "0.6462751", "0.645578", "0.64369947", "0.6321017", "0.63135034", "0.6298444", "0.6291888", "0.6255911", "0.6252867", "0.6248278", "0.622805", "0.6211412", "0.6194518" ]
0.81786555
0
Compute the cycle consistenty loss. L_cyc = lamA [Expectation of L1_norm(F(G(A)) A)] + lamb [Expectation of L1_norm(G(F(B)) B)]
def __cycle_consistency_loss(self, reconstructedA, reconstructedB): loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \ self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB)) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cycle_consistency_loss(self, ra, rb, fa, fb):\n with tf.device(\"/gpu:0\"):\n backward_loss = tf.reduce_mean(tf.abs(self.Ga2b(fa) - rb))\n with tf.device(\"/gpu:1\"):\n forward_loss = tf.reduce_mean(tf.abs(self.Gb2a(fb) - ra))\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return loss", "def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score", "def cycle_consistency_loss(self, reconstructed_x, reconstructed_y, x, y, loss_mode=2, ):\n if loss_mode == 1:\n forward_loss = tf.reduce_mean(tf.abs(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.abs(reconstructed_y - y))\n elif loss_mode == 2:\n forward_loss = tf.reduce_mean(tf.square(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.square(reconstructed_y - y))\n elif loss_mode == 3:\n forward_loss = tf.reduce_mean(tf.losses.huber_loss(x, reconstructed_x, weights=5, delta=0.2))\n backward_loss = tf.reduce_mean(tf.losses.huber_loss(y, reconstructed_y, weights=5, delta=0.2))\n elif loss_mode == 0:\n print 'cycle softmax'\n forward_loss_map = tf.square(reconstructed_x - x)\n backward_loss_map = tf.square(reconstructed_y - y)\n batchsize = forward_loss_map.get_shape()[0].value\n cycle_softmax_coef = 0.75\n\n reshaped_forward_loss_map = tf.reshape(forward_loss_map, shape=[batchsize, -1])\n forward_softmax_weight = tf.nn.softmax(reshaped_forward_loss_map*cycle_softmax_coef, dim=1)\n forward_loss = tf.reduce_sum(forward_softmax_weight * reshaped_forward_loss_map)\n\n reshaped_backward_loss_map = tf.reshape(backward_loss_map, shape=[batchsize, -1])\n backward_softmax_weight = tf.nn.softmax(reshaped_backward_loss_map*cycle_softmax_coef, dim=1)\n backward_loss = tf.reduce_sum(backward_softmax_weight * reshaped_backward_loss_map)\n\n else:\n print 'Unknown cycle loss mode'\n exit(0)\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return self.lambda1 * forward_loss, self.lambda2 * backward_loss, loss", "def compute_losses(self):\n cycle_consistency_loss_a = \\\n self._lambda_a * losses.cycle_consistency_loss(\n real_images=self.input_a, generated_images=self.cycle_images_a,\n )\n cycle_consistency_loss_b = \\\n self._lambda_b * losses.cycle_consistency_loss(\n real_images=self.input_b, generated_images=self.cycle_images_b,\n )\n\n lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)\n lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)\n\n g_loss_A = \\\n cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b\n g_loss_B = \\\n cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a\n\n d_loss_A = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_a_is_real,\n prob_fake_is_real=self.prob_fake_pool_a_is_real,\n )\n d_loss_B = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_b_is_real,\n prob_fake_is_real=self.prob_fake_pool_b_is_real,\n )\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)\n\n self.model_vars = tf.trainable_variables()\n\n d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]\n g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]\n d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]\n g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]\n\n self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)\n self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)\n self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)\n self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)\n\n for var in self.model_vars:\n print(var.name)\n\n # Summary variables for tensorboard\n self.g_A_loss_summ = tf.summary.scalar(\"g_A_loss\", g_loss_A)\n self.g_B_loss_summ = tf.summary.scalar(\"g_B_loss\", g_loss_B)\n self.d_A_loss_summ = tf.summary.scalar(\"d_A_loss\", d_loss_A)\n self.d_B_loss_summ = tf.summary.scalar(\"d_B_loss\", d_loss_B)", "def test_cfu_cycles(self):\n # Input: (function, in0, in1, cmd_valid, rsp_ready)\n # Output: (result, rsp_valid, cmd_ready)\n X = None\n DATA = [\n # Nothing\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Same cycle instruction, CPU not ready\n ((0, 1, 2, 1, 0), (3, 1, 1)),\n ((0, 0, 0, 0, 1), (3, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Multi-cycle instruction, CPU ready\n ((3, 3, 0, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (6, 1, 0)),\n # Same cycle instruction, CPU ready\n ((0, 5, 3, 1, 1), (8, 1, 1)),\n # Multi-cycle instruction, CPU not ready\n ((3, 2, 0, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 0, 0)),\n ((0, 0, 0, 0, 0), (2, 1, 0)),\n ((0, 0, 0, 0, 1), (2, 1, 0)),\n # Multi-cycle instruction, but always ready next cycle\n ((4, 3, 5, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (8, 1, 0)),\n # CPU not ready\n ((4, 3, 4, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 1), (7, 1, 0)),\n # Fallback instruction - same cycle, CPU ready\n ((7, 0, 0, 1, 1), (X, 1, 1)),\n ]\n\n def process():\n for n, (inputs, expected_outputs) in enumerate(DATA):\n func, i0, i1, cmd_valid, rsp_ready = inputs\n exp_result, exp_rsp_valid, exp_cmd_ready = expected_outputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(cmd_valid)\n yield self.dut.rsp_ready.eq(rsp_ready)\n yield Delay(0.1)\n if exp_result is not None:\n self.assertEqual((yield self.dut.rsp_out), exp_result)\n if exp_rsp_valid is not None:\n self.assertEqual((yield self.dut.rsp_valid), exp_rsp_valid)\n # We don't currently support returning non-OK responses, so\n # if our response is valid, it must be OK.\n if exp_rsp_valid:\n self.assertTrue((yield self.dut.rsp_ok))\n if exp_cmd_ready is not None:\n self.assertEqual((yield self.dut.cmd_ready), exp_cmd_ready)\n yield\n self.run_sim(process, False)", "def test_lcl_convergence():\n with pytest.raises(RuntimeError):\n lcl(1000. * units.mbar, 30. * units.degC, 20. * units.degC, max_iters=2)", "def cl_alm2d(alm1=None, alm2=None, lmax=100):\n if alm2 is None:\n alm2 = alm1\n cl = np.zeros(lmax+1)\n ls = np.arange(lmax+1)\n for l in ls:\n ms = np.arange(-l,l+1)\n \n cl[l] += ((alm1[l][ms]*np.conjugate(alm2[l][ms])).real).sum()/(2.*l+1.)\n return cl", "def lcs_dp(A, B):\n m = len(A)\n n = len(B) \n # array for storing the intermediate calculations \n temp_arr = [[None]*(n+1) for ]", "def cLCG(G):\n \n gens = []\n \n for g in G:\n gens.append(LCG(*g))\n \n m0 = G[0][3]-1\n \n while True:\n yield sum([(-1**j)*next(g) for j,g in enumerate(gens)]) % m0", "def test_lcl_convergence_issue():\n pressure = np.array([990, 973, 931, 925, 905]) * units.hPa\n temperature = np.array([14.4, 14.2, 13, 12.6, 11.4]) * units.degC\n dewpoint = np.array([14.4, 11.7, 8.2, 7.8, 7.6]) * units.degC\n lcl_pressure, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n assert_almost_equal(lcl_pressure, 990 * units.hPa, 0)", "def lam(freq):\n return C / freq", "def test_cca_speed(self):\n shape = (64, 64)\n H1 = Variable(torch.randn(shape[0], shape[1], dtype=torch.double), requires_grad=True)\n H2 = Variable(torch.randn(shape[0], shape[1], dtype=torch.double), requires_grad=True)\n reg = 0.1\n N = 100\n\n fwd_func = CorrelationLoss.forward\n start = time()\n for _ in range(N):\n corr = fwd_func(None, H1, H2, reg, False, None) # using autograd\n corr.backward()\n print(\"autograd time taken\", time() - start)\n\n start = time()\n for _ in range(N):\n corr = CorrLoss(H1, H2, reg, False, None) # using my forward & backward\n corr.backward()\n print(\"my grad time taken\", time() - start)", "def tacsim_combined_in_C(G1, G2=None, node_attribute='weight', edge_attribute='weight', lamb=0.5, norm=True):\n # X: node similarity; Y: edge similarity\n X, Y = tacsim_in_C(G1, G2, node_attribute, edge_attribute)\n\n As, At = node_edge_adjacency(G1)\n if G2 is None:\n Bs, Bt = As, At\n else:\n Bs, Bt = node_edge_adjacency(G2)\n\n Z = Y + lamb * np.dot(np.dot(As.T, X), Bs) + (1 - lamb) * np.dot(np.dot(At.T, X), Bt)\n\n if norm:\n return normalized(Z)\n else:\n return Z", "def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)", "def test_find_cycles_multiple_cycles(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n # Adding cycle g -> h -> g\n sg = self.skill_graph.add(Skill.build('g', ''))\n sh = self.skill_graph.add(Skill.build('h', ''))\n self.skill_graph.add_prerequisite(sg.id, sh.id)\n self.skill_graph.add_prerequisite(sh.id, sg.id)\n\n expected = [[self.sa.id, self.sd.id], [sg.id, sh.id]]\n skill_map = SkillMap.load(self.course)\n successors = skill_map.build_successors()\n result = SkillMapMetrics(skill_map).simple_cycles()\n self.assertEqual(len(result), len(expected))\n for cycle in result:\n self.assertIn(sorted(cycle), expected)", "def cyclic_merit_lag(x,*args):\n CS = args[0]\n print \"rindex\",CS.rindex\n ht = get_ht(x,CS.rindex)\n hf = time2freq(ht)\n CS.hf = hf\n CS.ht = ht\n cs_model,csplus,csminus,phases = make_model_cs(hf,CS.s0,CS.bw,CS.ref_freq)\n merit = 2*(np.abs(cs_model[:,1:] - CS.cs[:,1:])**2).sum() #ignore zeroth harmonic (dc term)\n \n # the objval list keeps track of how the convergence is going\n CS.objval.append(merit)\n \n #gradient_lag\n diff = cs_model - CS.cs #model - data\n cc1 = cs2cc(diff * csminus)\n \n# original c code for reference:\n# for (ilag=0; ilag<cc1.nlag; ilag++) {\n# gradient->data[ilag] = 0.0 + I * 0.0;\n# int lag = (ilag<=cc1.nlag/2) ? ilag : ilag-cc1.nlag;\n# tau = (double)lag * (double)cs->nchan /\n# ( (double)cc1.nlag * cc1.bw*1.e6 );\n# for (ih=1; ih<cc1.nharm; ih++) {\n# phs = M_PI * tau * (double)ih * cc1.ref_freq;\n# phasor = cos(phs)+I*sin(phs);\n# fftwf_complex *ccval = get_cc(&cc1,ih,ip,ilag);\n# gradient->data[ilag] += 4.0 * (*ccval) * phasor\n# * conj(s0->data[ih]) / (float)cs->nchan;\n# }\n# }\n\n #we reuse phases and csminus, csplus from the make_model_cs call\n\n phasors = np.exp(1j*phases)\n cs0 = np.repeat(CS.s0[np.newaxis,:],CS.nlag,axis=0) #filter2cs\n grad = 4.0 * cc1 * phasors * np.conj(cs0) / CS.nchan\n grad = grad[:,1:].sum(1) # sum over all harmonics to get function of lag\n \n #conjugate(res)\n #calc positive shear\n #multiply\n #cs2cc\n cc2 = cs2cc(np.conj(diff) * csplus)\n grad2 = 4.0 * cc2 * np.conj(phasors) * cs0 / CS.nchan\n \n grad = grad + grad2[:,1:].sum(1)\n CS.grad = grad[:]\n CS.model = cs_model[:]\n\n if CS.iprint:\n print \"merit= %.7e grad= %.7e\" % (merit,(np.abs(grad)**2).sum())\n \n if CS.make_plots:\n if CS.niter % CS.plot_every == 0:\n CS.plotCurrentSolution()\n \n \n \n grad = get_params(grad, CS.rindex)\n CS.niter += 1\n \n return merit,grad", "def transition(self, closure):\n nlp = closure()\n\n for i in range(len(self.momentums)):\n self.momentums[i] = torch.randn_like(self.momentums[i])\n\n for m, p in zip(self.momentums, self.params):\n m -= (1/2) * (self.t/self.L) * p.grad\n\n for l in range(self.L):\n with torch.no_grad():\n for m, p in zip(self.momentums, self.params):\n p += (self.t/self.L) * m\n if (l+1) != self.L:\n nlp = closure()\n for m, p in zip(self.momentums, self.params):\n m -= (self.t/self.L) * p.grad\n\n return nlp", "def mclCycle(self, moveData, senseData):\n self.countCycles += 1\n # Insert code for these steps here:\n # 1. Set up a new sample list and a new weights list\n sample_lst = []\n weight_lst = []\n # 2. Loop over every particle, and for each particle:\n # 3. Call motionUpdate on the particle and moveData\n # 4. Compute the new weight for this particle by calling perceptionUpdate on the new (updated) location\n # 5. Add these to the new samples and new weights lists\n for p in self.samples:\n new_p = self.motionUpdate(p, moveData)\n new_w = self.perceptionUpdate(new_p, senseData)\n sample_lst.append(new_p)\n weight_lst.append(new_w)\n # 6. Normalize the weights (note I've provided a method for this)\n weight_lst = self.normalize(weight_lst)\n # 7. Use the weights to resample from the new sample list (see the method I've provided)\n sample_lst, weight_lst = self.resample(sample_lst, weight_lst)\n # 8. Store the new samples into self.samples, and the new weights to a local variable, newSampleWeights\n self.samples = sample_lst\n newSampleWeights = weight_lst\n self.printMCLStatus()\n CoM = self.findCenterOfMass(newSampleWeights)\n return CoM", "def test_simple_bind_gradient_graph_possible_with_cycle():\n data = mx.symbol.Variable('data')\n res = data + data + data + data + data + data + data + data\n res._simple_bind(ctx=mx.cpu(), data=(1,))", "def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)", "def vcycle(v, b):\n if (len(v) - 1) & (len(v) - 2) != 0:\n raise ValueError(\"Lenth of v must be 2**n + 1.\")\n\n for i in range(3):\n jacobi23(v, b)\n\n if len(v) <= 3:\n return\n\n r = b - Amul(v)\n r2 = 4. * restrict(r)\n e2 = np.zeros_like(r2)\n vcycle(e2, r2)\n v += prolong(e2)\n\n for i in range(3):\n jacobi23(v, b)", "def test_find_cycles_one_cycle(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n self.assertEqual(6, len(skill_map.skills()))\n successors = skill_map.build_successors()\n self.assertEqual(\n sorted(SkillMapMetrics(skill_map).simple_cycles()[0]),\n [self.sa.id, self.sd.id])", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def MCLDemo():\n doorsWorld = [(0.0, 32.0, \"wall\"), (32.0, 48.0, \"no wall\"),\n (48.0, 93.0, \"wall\"), (93.0, 109.0, \"no wall\"), (109.0, 121.0, \"wall\"),\n (121.0, 137.0, \"no wall\"), (137.0, 182.0, \"wall\"), (182.0, 185.0, \"no wall\")]\n opposites = {\"wall\": \"no wall\", \"no wall\": \"wall\"}\n\n monte = MonteCarloLocalizer(1000, 0, 185, doorsWorld)\n\n # quick simulation to test the code\n actualLoc = 1.0\n expectedLoc = 1.0\n twoNumsStr = \"{0:7.3f} {1:7.3f}\"\n print(\"------------ Initial location, expected and actual:\", twoNumsStr.format(expectedLoc, actualLoc))\n while expectedLoc < 180:\n distMoved = random.gauss(2.0, 0.25)\n print(\"------------ Movement, expected and actual:\", twoNumsStr.format(2.0, distMoved))\n\n expectedLoc += 2.0\n actualLoc = actualLoc + distMoved\n print(\"------------ New location, expected and actual:\", twoNumsStr.format(expectedLoc, actualLoc))\n\n actualSensor = monte.getMapValue(actualLoc)\n oppSensor = opposites[actualSensor]\n sensorData = random.choices([actualSensor, oppSensor, \"unknown\"], [96, 1, 4])\n reportedData = sensorData[0]\n print(\"------------ Sensor value, actual and reported:\", actualSensor, reportedData)\n\n result = monte.mclCycle(2.0, reportedData)\n monte.printPoint(expectedLoc, 'E')\n monte.printPoint(actualLoc, 'A')\n if result is not None:\n monte.printPoint(result, 'C')\n print(\"MCL Result:\", result)", "def causal_structure_learning(X, lambda1=0.001, loss_type='l2', max_iter=100, h_tol=1e-8, rho_max=1e+16, w_threshold=0.3):\r\n\r\n def _loss(W):\r\n \"\"\"Evaluate value and gradient of loss.\"\"\"\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss\r\n\r\n def _h(W):\r\n \"\"\"Evaluate value and gradient of acyclicity constraint.\"\"\"\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h\r\n\r\n def _adj(w):\r\n \"\"\"Convert doubled variables ([2 d^2] array) back to original variables ([d, d] matrix).\"\"\"\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])\r\n\r\n def _func(w):\r\n \"\"\"Evaluate value and gradient of augmented Lagrangian for doubled variables ([2 d^2] array).\"\"\"\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj\r\n\r\n n, d = X.shape\r\n w_est, rho, alpha, h = np.zeros(2 * d * d), 1.0, 0.0, np.inf # double w_est into (w_pos, w_neg)\r\n bnds = [(0, 0) if i == j else (0, None) for _ in range(2) for i in range(d) for j in range(d)]\r\n for iter_j in range(max_iter):\r\n w_new, h_new = None, None\r\n print(iter_j)\r\n while rho < rho_max:\r\n sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=True, bounds=bnds)\r\n w_new = sol.x\r\n h_new, _ = _h(_adj(w_new))\r\n if h_new > 0.25 * h:\r\n rho *= 10\r\n else:\r\n break\r\n w_est, h = w_new, h_new\r\n alpha += rho * h\r\n if h <= h_tol or rho >= rho_max:\r\n break\r\n W_est = _adj(w_est)\r\n # print(W_est)\r\n W_est[np.abs(W_est) < w_threshold] = 0\r\n # print(W_est)\r\n return W_est, h", "def test_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model2(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,B = TM.eval_A_and_b(theta)\n\n dA_1, dB_1 = TM.diff_A_and_b(A, B, theta, 0)\n dA_2, dB_2 = TM.diff_A_and_b(A, B, theta, 1)\n dA_3, dB_3 = TM.diff_A_and_b(A, B, theta, 2)\n dA_4, dB_4 = TM.diff_A_and_b(A, B, theta, 3)\n Z = numpy.zeros_like(dA_1.todense())\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1.todense(), TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2.todense(), TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3.todense(), Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4.todense(), Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = LCSModel()\n gLCS.eval_A_and_b = TM.eval_A_and_b\n gLCS.diff_A_and_b = TM.diff_A_and_b\n \n gLCS.quiet=True\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A, B = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A,B = TM.eval_A_and_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt, B_dt = TM.eval_A_and_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,_ = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude", "def analytCylDifn(R, T):\n n = 30\n lmbdavec = spcl.jn_zeros(0, n)\n theta = 0*R\n for i, lmbda in enumerate(lmbdavec):\n theta += ((2./lmbda) * spcl.j0(lmbda*R)/spcl.j1(lmbda)\n * np.exp(-lmbda**2*T))\n return theta", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm" ]
[ "0.661372", "0.61719704", "0.60370505", "0.5863777", "0.569097", "0.5663334", "0.56262916", "0.5592393", "0.54651314", "0.5444343", "0.53959835", "0.5394466", "0.53734285", "0.53725505", "0.5371813", "0.5362057", "0.5342894", "0.53264534", "0.5277725", "0.5275031", "0.5255691", "0.5249246", "0.5233609", "0.5224999", "0.5223098", "0.52079904", "0.51894665", "0.5184089", "0.51771814", "0.51650673" ]
0.6959504
0
Compute the identity loss. L_idt = lamda_idt [lamA [Expectation of L1_norm(F(A) A)] + lamB [Expectation of L1_norm(G(B) B)]]
def __identity_loss(self, identA, identB): loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \ self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB))) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l1_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L1Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def L_pseudo_inverse_tf(self) -> tf.Tensor:\n return tf.py_func(np.linalg.pinv, [self.L_tf], tf.float32)", "def l1_loss(D, G, real_data, generated_data, losses, options):\n return torch.nn.L1Loss()(generated_data, real_data)", "def identity_block(input_tensor, units):\n x = layers.Dense(units, kernel_regularizer=reg)(input_tensor)\n x = l()(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Dense(units, kernel_regularizer=reg)(x)\n x = l()(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Dense(units, kernel_regularizer=reg)(x)\n x = l()(x)\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n\n return x", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def forward(self, output, target):\n fake_A, fake_B, idt_A, idt_B = output\n #Generators are trained to trick the discriminators so the following should be ones\n self.adv_loss_A = -torch.mean(self.dualgan.D_A(fake_A)) \n self.adv_loss_B = -torch.mean(self.dualgan.D_B(fake_B))\n \n #Reconstruction loss\n self.rec_loss_A = F.l1_loss(self.dualgan.G_A(fake_B), self.real_A)\n self.rec_loss_B = F.l1_loss(self.dualgan.G_B(fake_A), self.real_B)\n \n #Identity loss\n self.id_loss_A = F.l1_loss(idt_A, self.real_A)\n self.id_loss_B = F.l1_loss(idt_B, self.real_B)\n \n return self.l_adv*(self.adv_loss_A+self.adv_loss_B)+self.l_rec*(self.rec_loss_A+self.rec_loss_B)+self.l_idt*(self.id_loss_A+self.id_loss_B)", "def _lambda(self, x, y, t, x_his, y_his, t_his):\n lam = self.mu + tf.reduce_sum(self._kernel(x - x_his, y - y_his, t - t_his), axis=0)\n return lam", "def ImpliesLTL(one: LTL, two: LTL) -> LTL:\n vars = one.variables\n vars += two.variables\n formula = \"(\" + one.formula + \") -> (\" + two.formula + \")\"\n return LTL(formula, vars)", "def logit_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n return logits.gather(-1, target_ids).squeeze(-1)", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def loss_function(self, x, x_hat_logit, mu, log_sigma):\n rec_loss = nn.functional.binary_cross_entropy_with_logits(x_hat_logit, x, size_average=False)\n kl_loss = -0.5 * torch.sum(1 + log_sigma - mu.pow(2) - log_sigma.exp())\n\n return rec_loss + (kl_loss), rec_loss, kl_loss", "def cross_entropy(m_true, alpha, alpha0, m_probs, lambd=1.0):\n\n loss = tf.reduce_sum(input_tensor=m_true * (tf.math.digamma(alpha0) - tf.math.digamma(alpha)), axis=1, keepdims=True)\n loss = tf.reduce_mean(input_tensor=loss)\n if lambd > 0:\n kl = kullback_leibler_dirichlet(m_true, alpha)\n loss = loss + lambd * kl\n return loss", "def kl_latent_space(network, *args):\n\n z, log_det_J = network(*args)\n loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n return loss", "def _l1m_objective(a,X,*args):\n \n return(np.sum(np.apply_along_axis(_euclidnorm,1,_diffmat_objective(a,X))))", "def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l", "def ml_kl_loss(self, simulation, c1 = 1.0, ndims = 2, ehigh=1e5, emax = 1e10, turnover=200):\n loss = MLKL(c1, simulation, ndims, ehigh, emax, turnover)\n return loss.lossFunction", "def L_tf(self) -> tf.Tensor:\n return tf.diag(self.out_degrees_tf_vector) - self.A_tf", "def regularized_multinomial_likelihood(m_true, alpha, alpha0, m_probs, global_step, annealing_step=1000, max_lambda=1.0):\n\n ll = multinomial_likelihood(m_true, alpha, alpha0, m_probs)\n kl = kullback_leibler_dirichlet(m_true, alpha)\n lamb = tf.cast(tf.minimum(max_lambda, global_step / annealing_step), dtype=tf.float32)\n loss = ll + lamb * kl\n return loss", "def l1_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.abs(obs - actual) , 1)", "def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss", "def idn(x):\n\n def grad(dy):\n return dy\n\n return tf.ones_like(x), grad", "def gl64(function,a,b):\r\n # Parameters\r\n a = torch.tensor(a)\r\n b = torch.tensor(b)\r\n k1 = (b-a)/2\r\n k2 = (b+a)/2\r\n gl64 = torch.tensor(0.)\r\n c = 0\r\n\r\n for i in range(64):\r\n w_k = w_i[c]\r\n x_k = k1*x_i[c]+k2\r\n gl64 = gl64 + w_k*function(x_k.unsqueeze(0))\r\n c += 1\r\n \r\n return gl64*k1", "def loss(self):\n return la.norm(self.resids) / self.normX", "def l1(y_true, y_pred):\n if K.ndim(y_true) == 4:\n return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])\n elif K.ndim(y_true) == 3:\n return K.mean(K.abs(y_pred - y_true), axis=[1,2])\n else:\n raise NotImplementedError(\"Calculating L1 loss on 1D tensors? should not occur for this network\")", "def human_policy_kl_loss(student_logits, teacher_logits, action_type_kl_cost):\n # student_logits: list of ArgsActionLogits\n action_type_loss = kl(student_logits, teacher_logits, 1)\n kl_loss = action_type_kl_cost * torch.mean(action_type_loss)\n\n return kl_loss", "def loss(labels,q,M,a,b):\n x=-(labels*np.log(s.expit(z(q,M,a,b)))+(1-labels)*np.log(1-s.expit(z(q,M,a,b))))\n return np.sum(x)+l/2*(np.sum(M**2)+b**2)", "def __call__(self, x):\n h = F.relu(self.l0(x))\n h = F.relu(self.l1(h))\n return self.l2(h)", "def l2_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L2Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def nll_loss(\n inputs,\n axis=1,\n ignore_index=None,\n reduction='valid',\n **kwargs\n):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.NLLLoss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n axis=axis,\n reduction=args['reduction'],\n ignore_index=ignore_index,\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def generatorLoss(fakeOutput):\n return cross_entropy(tf.ones_like(fakeOutput), fakeOutput)" ]
[ "0.5652646", "0.5454709", "0.5431929", "0.5346413", "0.52700704", "0.5245193", "0.5241548", "0.5189512", "0.51814455", "0.51782507", "0.51747054", "0.5168681", "0.5149222", "0.51125103", "0.510106", "0.50966114", "0.5086483", "0.50756854", "0.50619715", "0.5041158", "0.5026359", "0.49958798", "0.49842122", "0.49822715", "0.4976281", "0.49656218", "0.49615568", "0.4958906", "0.49548876", "0.49539298" ]
0.69998527
0
Parses a tensorflow.SequenceExample into an image and caption.
def parse_sequence_example(serialized, image_id, image_feature, caption_feature): context, sequence = tf.parse_single_sequence_example( serialized, context_features={ image_id : tf.FixedLenFeature([], dtype=tf.int64), image_feature: tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), }) encoded_image_id = context[image_id] encoded_image = context[image_feature] caption = sequence[caption_feature] return encoded_image_id, encoded_image, caption
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_sequence_example(serialized, image_feature, caption_feature):\n\tcontext, sequence = tf.parse_single_sequence_example(\n\t\t\tserialized,\n\t\t\tcontext_features={\n\t\t\t\t\timage_feature: tf.FixedLenFeature([], dtype=tf.string)\n\t\t\t},\n\t\t\tsequence_features={\n\t\t\t\t\tcaption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n\t\t\t})\n\n\tencoded_image = context[image_feature]\n\tcaption = sequence[caption_feature]\n\treturn encoded_image, caption", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example", "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label", "def parse_fn(self, example_serialized):\n feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64)\n }\n features = tf.io.parse_single_example(example_serialized, feature_description)\n image = tf.io.decode_raw(features['image_raw'], tf.uint8)\n image = tf.cast(image, dtype='float32') / 255.0\n label = tf.cast(features['label'], dtype=tf.int32)\n image = tf.reshape(image, [32, 32, 3])\n if self.is_training:\n image = tf.image.resize_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = tf.image.random_crop(image, [32, 32, 3])\n image = tf.image.random_flip_left_right(image)\n return image, label", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def _parse_example(self, example, scale_to_0_1: bool = False):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], self.serialized_image_raw_dtype)\n image = tf.reshape(image, (self.image_width, self.image_width, self.image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], self.serialized_mask_raw_dtype)\n mask = tf.reshape(mask, (self.image_width, self.image_width, self.mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask", "def create_sequence_example(inner_image_path,\n inner_sample):\n\n # serialize a pointer to the disk location of the image features\n # copying data for every training example would consume too much storage\n image_path_feature = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[bytes(inner_image_path, \"utf-8\")]))\n\n # add all other tokens to the tf record\n words_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.words])\n tags_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.tags])\n\n # create the dictionary of features to save\n context_dict = dict(image_path=image_path_feature)\n sequence_dict = dict(words=words_feature, tags=tags_feature)\n\n # create a sequence example\n return tf.train.SequenceExample(\n context=tf.train.Features(feature=context_dict),\n feature_lists=tf.train.FeatureLists(\n feature_list=sequence_dict))", "def _convert_raw_example(\n self,\n mode_dict: MutableMapping[str, Any],\n example: Mapping[str, Any]) -> ProcessedExample:\n img_path = example['image_path_or_name']\n base_name = os.path.basename(img_path)\n img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))\n img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,\n filename=base_name)\n\n img_format = 'JPEG'\n key = hashlib.sha256(img_bytes.read()).hexdigest()\n img_bytes.seek(0)\n\n bboxes = example['bbox_info']\n processed_bboxes = []\n\n img_height = img_shape[0]\n img_width = img_shape[1]\n\n img_id = example.get('image_id', self._get_id('image'))\n mode_dict['images'].append({\n 'id': img_id,\n 'width': img_width,\n 'height': img_height,\n })\n\n for bbox_info in bboxes:\n annotations_bbox = bbox_info['bbox']\n bbox = bbox_utils.BBox(bbox=annotations_bbox,\n fmt=self.builder_config.bbox_format,\n img_width=img_width,\n img_height=img_height)\n label = bbox_info['label']\n if isinstance(label, int):\n text = str(label)\n elif isinstance(label, six.string_types):\n text = label\n label = bbox_info.get('label_id', self._get_label_id(text))\n else:\n raise TypeError(\n 'The provided label was not a string or int. Got: {}'.format(\n type(label)))\n\n if label >= self.builder_config.num_labels:\n raise ValueError('Provided label {} for {} is greater than '\n 'the number of classes specified. num_classes: '\n '{}'.format(label,\n base_name,\n self.builder_config.num_labels))\n\n annotation_id = example.get('annotation_id', self._get_id('annotation'))\n bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)\n xmin, xmax, ymin, ymax = bbox.as_tuple()\n bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)\n mode_dict['annotations'].append({\n 'id': annotation_id,\n 'image_id': img_id,\n 'category_id': label,\n 'bbox': annotations_bbox,\n })\n\n processed_bboxes.append({\n 'bbox': tfds.features.BBox(ymin=ymin,\n xmin=xmin,\n ymax=ymax,\n xmax=xmax),\n 'class': {\n 'text': text,\n 'label': label,\n }\n })\n\n return img_id, {\n 'image': {\n 'height': img_width,\n 'width': img_shape[1],\n 'filename': img_path,\n 'source_id': img_id,\n 'encoded': img_bytes,\n 'format': img_format,\n 'key': {\n 'sha256': key,\n },\n 'object': processed_bboxes,\n }\n }", "def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label", "def parser(_, serialized_example):\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq", "def create_cat_tf_example(label, label_text, img_path, img_name):\n\t\n\twith tf.gfile.FastGFile(img_path + img_name, 'rb') as fid:\n\t encoded_image = fid.read() \n\n\tencoded_image_data = sess.run(resize_image, {encoded_jpg_ph: encoded_image}) # I think this may not be the right way of doing this\n\tb_filename = str.encode(img_name)\n\n\timage_format = b'jpg'\n\txmins = [10.0 / width]\n\txmaxs = [(width - 10) / width]\n\tymins = [10.0 / height]\n\tymaxs = [(height - 10.0) / height]\n\t# classes_text = [str.encode(label_text)]\n\tclasses_text = []\n\tif label_text:\n\t\tclasses_text.append(label_text.encode('utf8'))\n\tclasses = []\n\t# if label == 1:\n\tclasses.append(int(label))\n\t# print(classes_text, classes, b_filename)\n\ttf_example = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(b_filename),\n\t\t'image/source_id': dataset_util.bytes_feature(b_filename),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n\t\t# 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature(image_format),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t}))\n\treturn tf_example", "def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit", "def convert_to_example(past_traj, future_traj):\n # img = cv2.imread('temp.png')\n # cv2.imshow('img', img)\n # cv2.waitKey(1)\n\n img_bytes = open('temp.png', 'rb').read()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': _bytes_feature(img_bytes),\n 'past_traj': _bytes_feature(past_traj.tostring()),\n 'future_traj': _bytes_feature(future_traj.tostring())\n }))\n\n return example", "def make_video_test_example(image_shape: Sequence[int] = (263, 320, 3),\n audio_shape: Sequence[int] = (10, 256),\n label: int = 42):\n raw_image_bytes = make_image_bytes(shape=image_shape)\n random_audio = np.random.normal(size=audio_shape).tolist()\n\n seq_example = tf.train.SequenceExample()\n put_int64_to_context(seq_example, label=label, key=LABEL_KEY)\n put_bytes_list_to_feature(\n seq_example, raw_image_bytes, key=IMAGE_KEY, repeat_num=4)\n\n put_float_list_to_feature(seq_example, value=random_audio, key=AUDIO_KEY)\n return seq_example", "def _parse_function(self, example_proto):\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample", "def _format_example(self, image_path=None):\r\n image = tf.io.read_file(image_path)\r\n image = tf.io.decode_jpeg(image)\r\n image = tf.cast(image, tf.float32)\r\n image = tf.image.per_image_standardization(image)\r\n image = tf.reshape(image, (self.img_size, self.img_size, 3))\r\n return image", "def format_example(image, label):\n image = tf.cast(image, tf.float32)\n image = (image/127.5) - 1\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))\n return image, label", "def parser(record):\n # keys_to_features = {\n # \"image_data\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n # \"date_time\": tf.FixedLenFeature((), tf.int64, default_value=\"\"),\n # \"label\": tf.FixedLenFeature((), tf.int64,\n # default_value=tf.zeros([], dtype=tf.int64)),\n # }\n\n keys_to_features = {\n \"image_data\": tf.FixedLenFeature((), tf.float, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32,\n default_value=tf.zeros([], dtype=tf.int64)),\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n\n # Perform additional preprocessing on the parsed data.\n image = tf.image.decode_jpeg(parsed[\"image_data\"])\n image = tf.reshape(image, [299, 299, 1])\n label = tf.cast(parsed[\"label\"], tf.int32)\n\n return {\"image_data\": image, \"date_time\": parsed[\"date_time\"]}, label", "def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.logging.info(\"%s: %s\", k, v)\n\n return example", "def create_tf_example(example, path, class_mapping):\n path = (path + os.sep).encode('ascii')\n filename = example['filename'].encode('ascii')\n image_format = b'jpg'\n \n image = plt.imread(path +filename, \"jpg\") \n height, width = image.shape[:2]\n \n # Encode the jpg to byte form\n with tf.gfile.GFile(path+filename, 'rb') as fid:\n encoded_jpg = bytes(fid.read())\n\n # normalize the box coordinates\n xmins = [box[0]/width for box in example['box_coords']] \n ymins = [box[1]/height for box in example['box_coords']] \n xmaxs = [box[2]/width for box in example['box_coords']]\n ymaxs = [box[3]/height for box in example['box_coords']]\n\n classes_text = [cls.encode('ascii') for cls in example[\"class\"]]\n classes = [class_mapping[cls] for cls in example[\"class\"]]\n\n # create the example\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height' : dataset_util.int64_feature(height),\n 'image/width' : dataset_util.int64_feature(width),\n 'image/filename' : dataset_util.bytes_feature(filename),\n 'image/source_id' : dataset_util.bytes_feature(filename),\n 'image/encoded' : dataset_util.bytes_feature(encoded_jpg),\n 'image/format' : dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label' : dataset_util.int64_list_feature(classes),\n }))\n return tf_example", "def parse_attention_example(tf_example):\n\n # specify features in attention example \n features_map = {\n 'sequence_raw': tf.FixedLenFeature([], tf.string),\n 'label_raw': tf.FixedLenFeature([], tf.string),\n 'annotation_raw': tf.FixedLenFeature([], tf.string)}\n\n # parse tf example for internal tensors\n parsed_example = tf.parse_single_example(tf_example, features_map)\n\n # decode examples\n sequence_raw = tf.decode_raw(parsed_example['sequence_raw'], tf.uint8)\n label_raw = tf.decode_raw(parsed_example['label_raw'], tf.uint8)\n annotation_raw = tf.decode_raw(parsed_example['annotation_raw'], tf.float32)\n\n # parsed tensors are flat so reshape if needed\n # cast to floats for attention task\n sequence = tf.cast(tf.reshape(sequence_raw, SEQUENCE_SHAPE), dtype=tf.float32)\n label = tf.cast(label_raw, dtype=tf.float32)\n annotation = tf.reshape(annotation_raw, ANNOTATION_SHAPE)\n\n return {'sequence': sequence, 'label': label, 'annotation': annotation}", "def preprocess(example, num_classes=10, is_training=True):\n features = {'scores': tf.VarLenFeature(tf.float32),\n 'image': tf.FixedLenFeature((), tf.string)}\n parsed = tf.parse_single_example(example, features)\n image = tf.image.decode_jpeg(parsed['image'], channels=3)\n image = nima.preprocess_image(image, is_training=is_training)\n scores = parsed['scores']\n scores = tf.sparse_tensor_to_dense(scores)\n scores = tf.reshape(scores, [num_classes])\n scores = scores / tf.reduce_sum(scores, axis=-1, keepdims=True)\n return image, scores", "def dict_to_tf_example(label_map_dict):\n filename = label_map_dict[0]\n img_path = os.path.join(FLAGS.image_data_dir, filename)\n\n try:\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n except:\n logging.warning('Image Not Found %s', img_path)\n return None\n\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n (witdh, height) = image.size\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n sentence_txt = label_map_dict[1]\n\n\n sentences = []\n f = open('dictionary.json', 'r')\n dictionary = f.read()\n dictionary = json.loads(dictionary)\n for index, _ in enumerate(sentence_txt):\n sentence = []\n for sen in sentence_txt[index].split(' '):\n try:\n sentence.append(dictionary[sen])\n except KeyError:\n sentence.append(dictionary['UNK'])\n sentences.append(sentence)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(witdh),\n 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')),\n 'image/score_0': dataset_util.int64_list_feature(sentences[0]),\n 'image/score_1': dataset_util.int64_list_feature(sentences[1]),\n 'image/score_2': dataset_util.int64_list_feature(sentences[2]),\n 'image/score_3': dataset_util.int64_list_feature(sentences[3]),\n 'image/score_4': dataset_util.int64_list_feature(sentences[4]),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8'))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "def _generate_examples(self, images_path, annotations_path):\n caption_file = '/captions_val2014.json'\n with tf.io.gfile.GFile(annotations_path + caption_file) as f:\n data = json.load(f)\n path_head = images_path + '/COCO_val2014_'\n ann = data['annotations'] # Contains annotations\n \n img_names = [path_head + '%012d.jpg' % i['image_id'] for i in ann] \n captions = ['<start> ' + i['caption'] + ' <end>' for i in ann]\n ids = [i['id'] for i in ann]\n \n # The above lines create the captions (start and end tokens), the \n # image names (which consist of the path head and a 12 digit number,\n # right-aligned with the id), and the id to distinguish each unique image.\n\n for (i, name) in enumerate(img_names):\n yield ids[i], {\n 'image': name,\n 'caption': captions[i]\n }", "def parse_example(example, image_width:int = 224, image_channels: int = 3, mask_channels: int = 1000, scale_to_0_1: bool = False, serialized_mask_raw_dtype = tf.float64):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], tf.uint8)\n image = tf.reshape(image, (image_width, image_width, image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], serialized_mask_raw_dtype) # tf.uint8)\n mask = tf.reshape(mask, (image_width, image_width, mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask" ]
[ "0.7773432", "0.70653176", "0.6706283", "0.66781205", "0.66210777", "0.6502138", "0.63294804", "0.63211375", "0.62616134", "0.62233996", "0.6201436", "0.6195922", "0.6186161", "0.6184599", "0.6164636", "0.6148699", "0.6147658", "0.614749", "0.6147216", "0.61331445", "0.6099785", "0.6097788", "0.6080328", "0.6024054", "0.5964376", "0.59438324", "0.5934399", "0.5927543", "0.5909124", "0.5894257" ]
0.7657589
1
Run demo, testing whether input words are beer related.
def run_demo(): while True: embeddings = beer_emb.embed_doc(input("Test if words are beer-related: "), word_filter=False) for word_vec in embeddings: print(is_beer_related(word_vec))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def demo(cls):\n print(\"\\tDemo for class Preprocess\\n\"\n \"For each method, you can see its arguments and output. \"\n \"For more information use the help function.\\n\\n\"\n \"Arguments used for instanciating the class:\\n\"\n \"\\tcorpus - {}\".format(cls.DEMO[\"corpus\"]))\n pre = cls(**cls.DEMO)\n print(\"{:=^90}\".format(\"corpus_stats()\"))\n pre.corpus_stats()\n print(\"{:=^90}\".format(\"bigrams()\"))\n print(pre.bigrams())\n print(\"{:=^90}\".format(\"bigrams('domain1.txt')\"))\n print(pre.bigrams(\"domain1.txt\"))\n print(\"{:=^90}\".format(\"get_frequency\"\n \"([('computational', 'linguistics'), \"\n \"('not', 'present')])\"))\n print(pre.get_frequency([('computational', 'linguistics'),\n ('not', 'present')]))\n print(\"{:=^90}\".format(\"is_lexical('hello', 'world')\"))\n print(pre.is_lexical('hello', 'world'))\n print(\"{:=^90}\".format(\"is_lexical('hello', '?')\"))\n print(pre.is_lexical('hello', '?'))\n print(\"{:=^90}\".format(\"has_relevant_tag(('computational', \"\n \"'linguistics'), \"\n \"relevant={'NN', 'NNP', 'NNS'})\"))\n print(pre.has_relevant_tag(('computational', 'linguistics'),\n relevant={'NN', 'NNP', 'NNS'}))\n print(\"{:=^90}\".format(\"has_relevant_tag(('is', 'difficult'),\"\n \"relevant={'NN', 'NNP', 'NNS'})\"))\n print(pre.has_relevant_tag(('is', 'difficult'),\n relevant={'NN', 'NNP', 'NNS'}))\n print(\"{:=^90}\".format(\"candidates(min_count=1, \"\n \"stops=['is', 'the', 'for', 'of'], \"\n \"tags={'NN', 'NNP', 'NNS'})\"))\n print(pre.candidates(min_count=1,\n stops=['is', 'the', 'for', 'of'],\n tags={'NN', 'NNP', 'NNS'}))", "def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def main(word_count=2, use_caps=False, use_leet=False, caps_percent=25, leet_percent=20):\n\n phrase = get_phrase(word_count)\n\n if use_caps:\n phrase = random_caps(phrase, caps_percent)\n\n if use_leet:\n phrase = random_characters(phrase, leet_percent)\n\n print(phrase)", "def main():\n\n args = get_args()\n words = args.phrase\n\n words = codify_phrase(words)\n display = ' '.join(words)\n\n print(display)", "def runTests():\n\n\tsentenceList = [\n\t\t\"Sore was I ere I saw Eros.\",\n\t\t\"This is not a Palindrome!\",\n\t\t\"A man, a plan, a canal -- Panama\",\n\t\t\"Never a foot too far, even.\",\n\t\t\"Euston saw I was not Sue.\",\n\t\t\"Live on evasions? No, I save no evil.\",\n\t\t\"Red Roses run no risk, sir, on nurses order.\",\n\t\t\"Salisbury moor, sir, is roomy. Rub Silas.\",\n\t\t'''Marge, let's \"went.\" I await news telegram.''',\n\t\t\"A new order began, a more Roman age bred Rowena.\",\n\t\t\"I, man, am regal; a German am I.\",\n\t\t\"Tracy, no panic in a pony-cart.\",\n\t\t\"Egad! Loretta has Adams as mad as a hatter. Old age!\",\n\t\t\"Eve, mad Adam, Eve!\",\n\t\t\"Resume so pacific a pose, muser.\",\n\t\t\"Marge let a moody baby doom a telegram.\",\n\t\t\"Tenet C is a basis, a basic tenet.\",\n\t\t'''Nella's simple hymn: \"I attain my help, Miss Allen.\"''',\n\t\t\"Straw? No, too stupid a fad. I put soot on warts.\",\n\t\t\"Sir, I demand, I am a maid named Iris.\",\n\t\t\"Lay a wallaby baby ball away, Al.\",\n\t\t\"Tessa's in Italy, Latin is asset.\",\n\t\t\"Noel sees Leon.\",\n\t\t]\n\n\tprint()\n\tprint(\"Start of Proposal by Conrad Storz...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Storz(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Jaysen...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Jaysen(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Phillip Adkins...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_PhillipAdkins(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Dmitry Kreslavskiy...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Dmitry(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def main():\n answers_style = drink_style_input()\n drink = drink_make(answers_style)\n print \"\"\n print \"Your drink includes:\"\n for ingredient in drink:\n print \"A {}\".format(ingredient)", "def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def run_tests():\n source1 = TextModel(\"Barack Obama\")\n source1.add_file('project/source_texts/barackobama_source_text.txt')\n\n source2 = TextModel('Donald Trump')\n source2.add_file('project/source_texts/donaldtrump_source_text.txt')\n\n new1 = TextModel('More Obama')\n new1.add_file('project/source_texts/moreobama_source_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('More Trump')\n new2.add_file('project/source_texts/moretrump_source_text.txt')\n new2.classify(source1, source2)\n\n new1 = TextModel('Gucci Gang by Lil Pump')\n new1.add_file('project/source_texts/guccigang_source_text.txt')\n new1.classify(source1, source2)\n\n new1 = TextModel(\"Spongebob Transcripts\")\n new1.add_file('project/source_texts/spongebobeps_source_text.txt')\n new1.classify(source1, source2)", "def main():\n myfactory = Faker()\n # database should be sorted lexographically and should not have any duplicate values\n # database = [\"abracadara\", \"al\", \"alice\", \"alicia\", \"allen\", \"alter\", \"altercation\", \"bob\", \"element\", \"ello\", \"eve\",\n # \"evening\", \"event\", \"eventually\", \"mallory\",\n database = sorted(list(set(myfactory.words(1000000000) +\n [\"za\", \"zazb\", \"zazc\", \"zazd\", \"zaze\", \"zazy\", \"zazz\", \"zb\", \"zba\", \"zbc\", \"zbd\", \"zbe\", \"zbz\"])))\n query = lambda prefix: [d for d in database if d.startswith(prefix)][:5]\n assert extract(query) == database", "def test_two_game(self):\n self.choice.side_effect = [\"ant\", \"baboon\"]\n self.input.side_effect = list(\"ant\" \"y\" \"babon\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')\n self.xprint.assert_any_call('Yes! The secret word is \"baboon\"! '\n 'You have won!')", "def yes_straw_warts():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"straw warts\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def test_text_classifier_vaporise(self):\n pass", "def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def example_single(args, model, word2idx):\n #在命令行中加载和分段<目标、(推特内容)>配对\n while True:\n target = raw_input(\"问题: \")\n tweet = raw_input(\"回答: \")\n targets = [str(target)]\n tweets = [str(tweet)]\n seged_tweets = yutils.seg_sentence(tweets, choice=\"list\", place=\"hpc\") # may use lexicon here\n seged_targets = yutils.seg_sentence(targets, choice=\"list\", place=\"hpc\")\n predictions = evaluate(args, model, word2idx, seged_tweets, seged_targets)\n print(\"预测结果: \", predictions)", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def test_example():\n example_text = ['''Mark and Jack welcome back to couch on crackerjacks today I'm gonna show you how to make a basic and delicious potato salad some people might call this a country style potato salad some people might refer to it as a deli style of potato salad either way it's got the perfect balance of sweet and tangy from the sugar and the vinegar and pickles and everything else that's in this it's just your basic homemade potato salad you can add any number of things to this to make it your own but I'm just going to show you how I like to make mine so without further ado let's get started so naturally I'm going to start out with my potatoes every potato salad starts with potatoes for this recipe and for my potato salad I prefer using just regular old russet potatoes they're the cheapest they're the best I've tried using Yukon Gold potatoes and red potatoes for this recipe I prefer hands down at the russet potatoes it just it makes the best potato salad for me you can use whatever kind of potatoes you like though and using a potato peeler I'm just going to peel these potatoes a little trick for you that little end on most potato peelers it's kind of rounded use that to dig out the eyes of your potato it's what I've always used it for so it's just the perfect little tool to dig out the eyes of a potato but what you want to do is just go ahead and peel your potatoes and you don't have to peel your potatoes if you don't want to if you like skin on potato salad by all means go ahead and leave the skin on it doesn't make any difference personal preference and as you're peeling your potatoes and you get one done go ahead and put them into a large pot this is going to be the same profit I cut these in that's filled up with water you want to make sure and keep your potatoes covered that will prevent your potatoes from oxidizing and turning that pinky brown color but you just want to go through and peel all of your potatoes and I am using three pounds of potatoes for this recipe now once you get all your potatoes peeled you want to go ahead and cut them up basically you want to cut these into about 3/4 inch square pieces so for these medium potatoes I cut them half I turn them 90 degrees cut them into three pea is if you will that way if it's a larger potato do four and then cut those into chunks basically like I said you want about three quarters of an inch by three quarters of an inch by three quarters of an inch pieces and then again throw your potatoes back into the water that you pulled the potatoes out of that way they do not oxidize on you now when you get all your potatoes cut up your water is going to be cloudy and it's gonna be murky and it's gonna be just full of all the starch coming off of those potatoes what you want to do is rinse your potatoes well you want to make sure that the water coming off of that is completely clear go ahead and rinse these a good three or four times and then drain them completely you want to make sure that all of that starch gets off of those potatoes then you want to go ahead and light your stove and take your pot and you want a large pot for this put it over a medium-high heat time actually even high heat or at this point take your drained potatoes and put those into your pot and you want to add enough cold water to this to come up about one inch over the top of the potatoes starting off with cool water your potatoes cook evenly as the water comes up to temperature your potatoes come up with them to temperature if you start out putting cold potatoes into boiling water the outside of the potato is gonna be mush before the inside is actually cooked and before this gets going too far I'm gonna take two large eggs and I'm gonna put those in the water with the potatoes this recipe uses hard-boiled eggs and since I'm boiling the potatoes anyway I might as well just boil the eggs right along with the potatoes so just go ahead and add two large eggs to the pot and you want to cover your pot and you want to bring this up to a boil now once your water is that a boy I'll go ahead and give your potatoes an egg a gentle stir you want to be careful with this because you don't do not want to break your eggs and you also don't want to break up the potatoes but once this comes up to a boil you want to boil this for exactly ten minutes and how to check to make sure that your potatoes are done you want to take a couple large pieces take them out put them on a spoon and using a fork you want to put the fork into the potato and you want just a little bit of give in your potatoes before they break apart if you can see there it's just the slightest little bit of give before the potato breaks up you don't want to cook these any longer than that because they they will finish cooking when you take them off heat but you want to go ahead and drain these in a colander and once they are drained well go ahead and pour your potatoes and eggs back into the pot that you cooked them in and here you can dig out your eggs and you want to put your eggs in a bowl of cold water you want to stop that cooking process as soon as possible because if you cook your eggs too long you're gonna get that dreaded green ring around the yolk go ahead and put those in a bowl of cold water to stop the cooking process immediately and then you want to keep your potatoes in the pot that you cook them in to cool and you want to cool them completely before you do anything else with them if you add a salad dressing to hot potatoes it's gonna break on you and you don't want that so just go ahead and let your potatoes steam off and cool and I'm gonna let these sit for about a half an hour before I even start making the dressing for my potato salad and while you're waiting for your potatoes to cool off you can go ahead and peel your eggs it helps to wait a little bit for your eggs to cool down before you peel them just go ahead and crack them on a countertop and then start peeling them if you peel them underneath water or running water they peel super easy so as you can see here's I mean it takes nothing to do it under water water gets under there and the shell just slips off I just go ahead and peel your egg eggs and set them off until later I'm gonna need a few vegetables for my dressing I went ahead and already cut up half of a yellow onion here off a video I thought I was recording when I wasn't you don't need to see me chopped onions anyway everybody knows how to do that I've also got two stalks of celery here I'm just going to cut the ends off as well as the tops if you want to save the tops they make a nice garnish you don't have to keep them and I'm not gonna keep them here the celery I'm going to cut these sticks or stalks into orders and then I'm going to chop those up because I don't like really big chunks of celery in my potato salad so I'm just gonna cut these into four slices and then turn them around and cut these into dices if you will and I'm just going to go ahead after I get that died and set those off to the side until I need them later now for our dressing in a large bowl and you want to make sure that you use a plenty large bowl for this because it does make a lot of potato salad I've got one and a half cups of mayonnaise this recipe really does not work with Miracle Whip so since we're gonna be adding sugar to this stick to the plain old mayonnaise I'm gonna throw my eggs in there and using the back of a fork I'm just gonna break up my eggs if you like big chunks of egg in your potato salad don't mash it up as much but I'm gonna mash this up pretty fine and then you want to add in a quarter of a cup of sugar as well as a teaspoon and a half of salt it seems like a lot of salt it really isn't because there are a lot of potatoes here two teaspoons of white vinegar just plain white distilled vinegar then you want to add two tablespoons of sweet pickle relish you could also use dill pickle relish if you wanted to I like sweet in mine and finally I'm gonna add in two teaspoons of prepared yellow mustard if you like a more mustardy potato salad you can add more mustard if you want to this perfectly acceptable and then using a spoon or a fork whatever just go ahead and mix this up well and then you want to add in your onions and celery and go ahead and get that mixed in and you want to make sure to mix all of your ingredients and get your dressing thoroughly mixed before you add the potatoes because you don't want to over mix this once you get your potatoes added so go ahead and take your cooled potatoes again make sure that they are at least room temperature you do not want them warm or hot at all but go ahead and add those into your bowl and then using a spatula I'm going to gently fold the dressing into my potatoes you want your potatoes to remain as in this large of chunks as possible so don't go crazy you know stirring it stirring stirring you want to gently fold this so your potatoes do stay as whole as possible and a little secret for you just to bind up the dressing just a little bit I'm going to add two tablespoons of instant mashed potato flakes into the finished mixture I'm just going to fold this in basically what those do the potato flakes they bind up the dressing and make the dressing firm it also helps it kind of stick to the potatoes a little bit better so you you know the dressing doesn't run off of the potatoes which can be a problem with some recipes so there you go you want to make sure that those potato flakes are evenly distributed in there and everything is well mixed together everything is combined perfectly go ahead and give this a taste make sure that the salt is ok for you if you need a little bit more salt go ahead and add it if you want to if you need more mustard or vinegar or eggs whatever now is the time to do it but you want to go ahead and cover this with a piece of cling wrap saran wrap and refrigerate this for at least four to six hours before you serve this the longer you let this sit the better it gets but there you go there's your basic all-around simple homemade deli style or country style potato salad definitely give this recipe a try if you do let me know how you like it down below in the comment section if you like this video be sure to give it a thumbs up I would greatly appreciate it subscribe for more deliciousness and to keep up to date on all my latest videos thanks so much for watching and we will see you next time''']\n\n return str(example_text)", "def test_text_classifier_get_testing_samples(self):\n pass", "def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n \n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)", "def test_text_classifier_add_testing_samples(self):\n pass", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)" ]
[ "0.64361274", "0.63639456", "0.63639456", "0.61933094", "0.5985079", "0.59168476", "0.58774954", "0.58620715", "0.58588994", "0.5743201", "0.57326597", "0.57213676", "0.5714051", "0.57126486", "0.5685293", "0.56775856", "0.56722903", "0.56616175", "0.56303006", "0.5619225", "0.56074715", "0.5596039", "0.5593733", "0.5568163", "0.5562195", "0.55598956", "0.55322343", "0.55266786", "0.5526038", "0.5520054" ]
0.7582199
0
Load parsed beautifulsoup object holding the full html
def load_parsed(self): with open(self.fname) as f: self.parsed = BeautifulSoup(f.read(), features="html.parser")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")", "def load_page(self) -> bs4.BeautifulSoup:\n\n res = requests.get(self.url)\n\n res.raise_for_status()\n return bs4.BeautifulSoup(res.text, 'html.parser')", "def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)", "def update_html(self):\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, features=\"lxml\")", "def make_soup(self):\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n f = urllib.request.urlopen(self.html)\n soupdata = BeautifulSoup(f, \"html.parser\")\n return soupdata", "def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )", "def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def make_file_soup(self):\n soup = BeautifulSoup(self.html, 'html.parser')\n return soup", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def get_soup(self, html):\n if html is not None:\n soup = BeautifulSoup(html, \"html.parser\")\n return soup\n else:\n return", "def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup", "def get_html_parser(url):\n response = requests.get(url)\n return BeautifulSoup(response.content, 'html.parser')", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def soup(self):\n if not self._soup:\n resp = requests.get(self.url)\n if not resp.ok:\n logging.warning('Status of request is not ok.')\n self._soup = BeautifulSoup(resp.content, 'html.parser')\n\n return self._soup", "def parse_html_with_bs4(html_src):\n try:\n BeautifulSoup(html_src, 'html.parser')\n except Exception as exc:\n print exc, traceback.format_exc()\n pass", "def load_data(self):\n with open(self.FILE, 'r') as html_file:\n document = html_file.read()\n self.HTML = document", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def bs4(self):\n\n if self._soup is None:\n self._soup = bs4.BeautifulSoup(self.raw_html, 'html.parser')\n return self._soup", "def from_html(self, content):\r\n pass", "def get_soup(url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"lxml\")\n\treturn soup", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def get_soup(url):\n\tresponse = urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')", "def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup", "def soup(self) -> Soup:\n return Soup(self.html)", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def get_soup(url):\n return BeautifulSoup(requests.get(url).content, 'lxml')" ]
[ "0.7414255", "0.7408412", "0.7246797", "0.7061659", "0.6892907", "0.68442833", "0.6834316", "0.6795834", "0.6783969", "0.67211264", "0.67171395", "0.6716718", "0.66845864", "0.6660223", "0.660617", "0.65939754", "0.6587271", "0.6559653", "0.65470797", "0.6519594", "0.65095526", "0.64791566", "0.64658093", "0.64374965", "0.6401383", "0.639497", "0.6391123", "0.6386812", "0.63863516", "0.6374355" ]
0.7917264
0
Iterator over maintext paragraph elements; this includes footnotes.
def _paragraphs_raw(self): for par in self.parsed.find_all("p")[self.PAR_START:]: yield par
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linked_text_paragraphs(self):\n for par in self._main_paragraphs_raw():\n par_links = par.find_all('a')\n if len(par_links) == 0:\n self.main_count += len(par.text)\n yield par.text\n else:\n for el in par.contents:\n if el.name is None:\n #this is plain text\n self.main_count += len(str(el))\n yield str(el)\n elif el.name == \"a\" and \"href\" in el.attrs:\n id = el[\"href\"].lstrip('#')\n try:\n foot_par = self._get_footnote_par(id)\n except NoFootnoteError:\n self.log(f\"Could not find footnote for {id}, skipping.\")\n self.footnote_count += len(foot_par.text)\n yield foot_par.text", "def iter_main_text(self, element):\n if element.tag == 'note':\n return\n if element.text:\n yield element.text\n for e in element:\n for se in self.iter_main_text(e):\n yield se\n if e.tail:\n yield e.tail", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def paragraph(self, text):\n return [text]", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def extract_paragraph_test(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc(parser = 'cde_parser')\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_test(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def get_paragraphs(self, batch=None):\n\t\t\n\t\t# loop through the document stream for this document database\n\t\tfor document in self.get_documents(batch):\n\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t# yield the paragraphs one by one\n\t\t\t\tyield paragraph", "def paragraphs(self, data=True):\n return self.nodes(self.max_depth, data)", "def end_paragraph(self):\n raise NotImplementedError", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def __iter__(self):\r\n for text in self.get_texts():\r\n yield self.dictionary.doc2bow(text, allow_update=False)", "def paragraphs(iterable, splitter):\n assert isinstance(splitter, (tuple, list))\n splitter = tuple(splitter)\n paragraph = []\n for line in iterable:\n if line.startswith(splitter):\n if paragraph:\n yield paragraph\n paragraph = [line]\n else:\n paragraph.append(line)\n if paragraph:\n yield paragraph", "def generate_new_book(text):\n\n for paragraph in text:\n for sentence in paragraph:\n for word in sentence:\n print(word, end=' ')\n print()\n print()", "def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr", "def generate_paragraphs(self):\n def dig(hr_tag, end_index):\n paragraphs = []\n for tag in hr_tag.children:\n if tag.name == 'hr':\n return paragraphs + dig(tag, end_index)\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if '$' in text and not tag.find('table'):\n start_index = document_txt.index(text[:search_chars])\n end_index = start_index + len(text)\n paragraphs.append({\n 'text': text,\n 'start': start_index,\n 'end': end_index\n })\n return paragraphs\n\n with open('document.txt', 'rb') as f1:\n document_txt = f1.read().decode()\n search_chars = 20\n paragraphs = dig(self.soup.find('body'), 0)\n paragraphs = sorted(paragraphs, key=lambda x: x['start'])\n with open('paragraphs.txt', 'wb') as f2:\n f2.write(json.dumps(paragraphs, indent=2, sort_keys=True).encode())", "def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()", "def paragraphs(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}')+1)]\n number_of_paragraphs = len(list(root.iter(root_tag + 'p')))\n return number_of_paragraphs", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def _process_layout(self, layout):\r\n # Here we just group text into paragraphs\r\n elements = []\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n elements.append(Paragraph(lt_obj.get_text().strip()))\r\n elif isinstance(lt_obj, LTFigure):\r\n # Recursive...\r\n elements.extend(self._process_layout(lt_obj))\r\n return elements", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def _process_layout(self, layout):\n # Here we just group text into paragraphs\n elements = []\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n elements.append(Paragraph(lt_obj.get_text().strip()))\n elif isinstance(lt_obj, LTFigure):\n # Recursive...\n elements.extend(self._process_layout(lt_obj))\n return elements", "def read(self, paragraph_idx=None):\n if paragraph_idx:\n self.paragraphs[paragraph_idx].read()\n else:\n for paragraph in self.paragraphs:\n paragraph.read()", "def find_text_in_p(self, el):\n\n all = []\n for el in el.findall(\".//p\"):\n t = el.text_content().strip()\n if len(t)<40:\n continue\n all.append(t)\n\n return \" \".join(all)", "def getRtf(self):\n self.pieces = []\n for node in self.root.findall(\"MiscellaneousDocumentText\"):\n for child in node:\n if child.tag == \"Para\":\n self.__addPara(child)\n elif child.tag in (\"ItemizedList\", \"OrderedList\"):\n self.__addList(child, child.tag)\n return \"\".join(self.pieces)", "def split_paragraphs(block):\n # Break block contents into paragraphs by blank lines.\n def gen(block):\n par = []\n for obj in block:\n if isinstance(obj, Text) and obj.empty:\n # New paragraph.\n yield par\n par = []\n else:\n par.append(obj)\n yield par\n\n # Combine paragraphs. \n def finish(pars):\n for par in pars:\n if len(par) == 0:\n continue\n elif any( isinstance(o, Text) for o in par ):\n # Paragraph contains text. Use a P element.\n yield Block(par, tag='P')\n else:\n # Doesn't contain text; don't wrap it.\n yield from par\n\n block[:] = finish(gen(block))", "def _convert(self):\n root = cElementTree.fromstring(self.html)\n for el in root.getiterator():\n if el in self.visited:\n continue\n self.visited.update([el])\n if el.tag == 'p':\n parser = ParagraphParser(el)\n self.document_state.append(parser.tag)\n self.visited.update(el.getiterator())", "def generate_paragraphs(self, count=3):\n\n with self.open_text_data() as f:\n result = self.read_paragraphs(f, count=count)\n return result", "def __yahoo_parse_text(self, content):\n text = ''\n # Process all paragraphs.\n paragraphs = content.find_all('p')\n for par in paragraphs:\n text += '<p>' + par.getText(separator=' ') + '</p>'\n # Remove all extra whitespace (single space remains).\n text = ' '.join(text.strip().split())\n # Result\n return text", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def extract_sentences(paper_path, para_yes):\n\n f = open(paper_path, 'rb')\n doc = Document.from_file(f, readers=[HtmlReader()])\n\n sen_yes_arr = list()\n sen_no_arr = list()\n\n elem_all = np.arange(0,len(doc))\n para_no = np.delete(elem_all, para_yes)\n\n for i in para_no:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_no_arr.append(sentence)\n\n for i in para_yes:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_yes_arr.append(sentence)\n\n\n return sen_yes_arr, sen_no_arr" ]
[ "0.70568925", "0.6538393", "0.630119", "0.6105424", "0.59029144", "0.5857844", "0.56422466", "0.561061", "0.5597017", "0.5584193", "0.55805284", "0.55002755", "0.5490439", "0.5486235", "0.5449823", "0.5444187", "0.5435644", "0.5427534", "0.5414275", "0.54038227", "0.5385818", "0.5359002", "0.5344678", "0.53430986", "0.53422266", "0.5325232", "0.5321369", "0.53143275", "0.5266145", "0.5233419" ]
0.6565161
1
Checks whether an element contains footnote text.
def is_footnote_text(self, par): return (par is not None) and ("foot" in par.attrs.get("class", []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def is_plugin_note(self, note):\n return bool(self.regex.match(note))", "def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def is_text( self ):\n return self.get_main_type() == 'text'", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def has_textframe(self):\n return _child(self._element, 'p:txBody') is not None", "def is_ends_with_tag(text):\n\treturn re_tag.search(text) != None", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def has_text(self, text, match_option=None):\n selector_text = UiSelector().attributes(\"text\", text, match_option)\n selector_content_desc = UiSelector().attributes(\"content-desc\", text,\n match_option)\n\n return UiObject(\n selector_text, self.android_device_driver).verify_exist() or UiObject(\n selector_content_desc, self.android_device_driver).verify_exist()", "def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)", "def is_tagged_text(text):\n return len(text) > len(strip_tags(text))", "def assert_has_text(self, xml_root, xpath, text, exact=True):\r\n element_list = xml_root.xpath(xpath)\r\n self.assertTrue(len(element_list) > 0,\r\n \"Could not find element at '%s'\" % str(xpath))\r\n\r\n if exact:\r\n self.assertEqual(text, element_list[0].text)\r\n else:\r\n self.assertIn(text, element_list[0].text)", "def is_important_text(node):\n\n return not (0 < len(node.get_text()) < TEXT_MIN_SCORE\n and node.name not in HEADING_TAGS)", "def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False", "def hasContents():", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def is_plugin_note(self, note):\n return False", "def _is_jupytext_file(ntbk):\n jupytext_meta = ntbk.get('metadata', {}).get('jupytext')\n if jupytext_meta is None:\n return False\n else:\n return jupytext_meta.get('notebook_metadata_filter', '') != \"-all\"", "def is_text_exists(self, locator_type, locator, text):\n try:\n self.wait_for_text(locator_type, locator, text)\n return True\n except TimeoutException:\n return False", "def _is_text_tag(tag):\n return tag.name not in ['script', 'style']", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def is_tip(text):\n\n amount = 0\n if re.search(r'I sent you a \\$[0-9]*\\.00 tip ♥', text):\n amount = re.match(r'I sent you a \\$([0-9]*)\\.00 tip ♥', text).group(1)\n Settings.maybe_print(\"successfully found tip\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n elif re.search(r\"I\\'ve contributed \\$[0-9]*\\.00 to your Campaign\", text):\n amount = re.match(r'I\\'ve contributed \\$([0-9]*)\\.00 to your Campaign', text).group(1)\n Settings.maybe_print(\"successfully found campaign donation\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n return False, int(amount)", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0" ]
[ "0.72341335", "0.688278", "0.6498541", "0.6382326", "0.61090255", "0.6085547", "0.59022486", "0.5810465", "0.5732255", "0.5724639", "0.56633186", "0.55054045", "0.54965115", "0.5481354", "0.54613525", "0.5442589", "0.5437752", "0.54305816", "0.53904307", "0.5370518", "0.5368163", "0.53493077", "0.5339378", "0.5335499", "0.529686", "0.52964383", "0.5295664", "0.5281439", "0.5256145", "0.52471274" ]
0.80490977
0
Checks whether an element is a link adjacent to footnote text.
def is_footnote_link(self, par): return self.is_footnote_text(par.find_next_sibling('p'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def is_link(s):\n return (len(s) == 2 and is_link(s[1])) or s == empty", "def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def is_href_valid(self, link):\n url = str(link['href'])\n # if it doesn't lead to a wiki page\n if not url.startswith(\"/wiki/\"):\n return False\n\n wikipedia_classes = [\"external_text\", \"mw-disambig\", \"infobox-data\"]\n # if the href has a class\n if link.get(\"class\") is not None:\n link_class = \"_\".join(link.get(\"class\"))\n # if the class is an external text class, or a disambiguation link\n if any(wiki_class in link_class for wiki_class in wikipedia_classes):\n return False\n\n if 'wikimedia' in url or 'wiktionary' in url:\n return False\n wikipedia_keywords = [\"Help\", \"Category\", \"Wikipedia\", \"Template\", \"File\", \"Talk\", \"Special\", \"Portal\"]\n if any(keyword + ':' in url for keyword in wikipedia_keywords):\n return False\n if '#' in url:\n return False\n # if the page is a file\n if re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z]$\", url) or re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z]$\", url):\n return False\n\n # if the href is enclosed in brackets\n if WikiPage.is_substring_enclosed_in_brackets(link, link.parent.parent):\n return False\n\n wikipedia_not_needed_tags = ['small', 'sup', 'i']\n if link.parent.name in wikipedia_not_needed_tags:\n return False\n\n # if the href shows two different spellings. like in: https://en.wikipedia.org/wiki/Carbon_fibers\n # Carbon fibers ~or~ carbon fibres - here or is the href.\n\n if link.contents == [\"or\"]:\n return False\n\n parents_classes = [p.get(\"class\") for p in link.parents if p.get(\"class\") is not None]\n parents_classes = [str(\"_\".join(p)) for p in parents_classes]\n parents_ids = [p.get(\"id\") for p in link.parents if p.get(\"id\") is not None]\n\n # 'toc' - the Contents menu class\n # 'mw-editsection' - the Edit section\n # 'thumbcaption' - a Photo Caption\n # 'hlist' - a list like in: https://en.wikipedia.org/wiki/January\n wikipedia_classes_to_ignore = [\"thumbcaption\", \"infobox\", \"navigation-not-searchable\", \"sidebar\", \"box-text\",\n \"toc\", \"mw-editsection\", \"thumb\", \"hlist\", \"navbox\"]\n\n for p_class in parents_classes:\n\n if any(class_to_ignore in p_class for class_to_ignore in wikipedia_classes_to_ignore):\n return False\n\n # if it is a coordinates href\n if \"coordinates\" in parents_ids:\n return False\n\n '''\n Update 13.04.2021:\n ------------------\n Someone edited the \"Epistemology\" page. and changed the first link <a>branches<a/>.\n Instead of pointing to the page \"Branches of science\", it was changed to point to \"Outline of philosophy\".\n Which creates a loop. I chose to ignore it manually, and instead click on the next link.\n ( which happens to be Philosophy :) )\n This changed also caused some of the \"paths\" in the PDF files,\n generated before that date to be slightly outdated. But the concept stays the same :)\n \n Update 08.05.2021:\n ------------------\n they fixed it since :)\n \"Epistemology\" -> branches of philosophy : \"https://en.wikipedia.org/wiki/Outline_of_philosophy\" ->\n -> Philosophy.\n \n #if \"Outline_of_philosophy\" in url:\n # return False\n '''\n\n return True", "def islink(path):\n return get_instance(path).islink(path)", "def islink(self):\n return os.path.islink(self.path)", "def islink(self, path):\n return os.path.islink(path)", "def is_link(self, url):\n return not self.is_page(url)", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def test_link(self):\n\n markup = \"\"\"\n <div>\n <p>Some text <span id=\"1\" class=\"foo:bar:foobar\"> in a paragraph</span>.\n <a id=\"2\" class=\"bar\" href=\"http://google.com\">Link</a>\n <a id=\"3\">Placeholder text.</a>\n </p>\n </div>\n \"\"\"\n\n self.assert_selector(\n markup,\n \":link\",\n [\"2\"],\n flags=util.HTML5\n )\n\n self.assert_selector(\n markup,\n \"a:link\",\n [],\n flags=util.XML\n )", "def _IsLink(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(\n file_attribute_flags & pyfsntfs.file_attribute_flags.REPARSE_POINT)", "def isPostLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_POST_LINK_REL_LIST)", "def check_for_url_in_text(self, string):\r\n has_link = False\r\n\r\n # Find all links in the string.\r\n links = re.findall(r'(https?://\\S+)', string)\r\n if len(links)>0:\r\n has_link = True\r\n\r\n # Autolink by wrapping links in anchor tags.\r\n for link in links:\r\n string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)\r\n\r\n return has_link, string", "def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True", "def test_link(self):\n comment = \"[link](http://foo.com)\"\n comment_md = Markdown().render(comment)\n self.assertEqual(comment_md, '<p><a rel=\"nofollow\" href=\"http://foo.com\">link</a></p>')", "def is_link(token):\n\n pattern = r'ht{2}p(s|)\\:\\/\\/(w{3}.|)[\\w]+\\.[\\w]+\\/[\\w\\d]+'\n return re.match(pattern, token)", "def hasEntityLink(self, link):\r\n return self.feed_handler.hasEntityLink(link)", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def isFeedLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_FEED_LINK_REL_LIST)", "def is_valid_tag(self, tag):\n\n if tag.has_attr('href') and len(tag['href']) > 0:\n href = tag['href']\n complete_href = self.session.complete_url(href)\n\n is_relative = self.url in complete_href\n is_visited = complete_href in self.visited_paths\n is_style_sheet = tag.name == \"link\"\n is_jumpTo = \"#\" in href\n is_mailTo = \"mailto\" in href\n is_js = \"javascript:\" in href\n return is_relative and \\\n not (is_visited or is_style_sheet or is_jumpTo or is_mailTo or is_js)\n else:\n return False", "def isLinkName(word):\r\n return wikiLink.match(word)", "def check_link(feed):\n # see if this is youtube link\n if feed['link'].count('youtube.com') and 'embed' in feed and feed['embed']:\n y = re.findall('youtube\\.com/embed/(.+)', feed['embed'])\n if y:\n # format correct youtube link\n feed['link'] = 'http://youtu.be/{0}'.format(y[0])\n return True\n\n return False", "def test_linkify(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)", "def isHighLinkDensity(self, e):\n links = Parser.getElementsByTag(e, tag='a')\n if links is None or len(links) == 0:\n return False\n \n text = Parser.getText(e)\n words = text.split(' ')\n numberOfWords = float(len(words))\n sb = []\n for link in links:\n sb.append(Parser.getText(link))\n \n linkText = ''.join(sb)\n linkWords = linkText.split(' ')\n numberOfLinkWords = float(len(linkWords))\n numberOfLinks = float(len(links))\n linkDivisor = float(numberOfLinkWords / numberOfWords)\n score = float(linkDivisor * numberOfLinks)\n if score >= 1.0:\n return True\n return False\n # return True if score > 1.0 else False", "def isAlternateLink(self, rel, type = None): #$NON-NLS-1$\r\n return u\"alternate\" == rel.strip().lower() #$NON-NLS-1$\r", "def assert_has_valid_link(self, response, expected_ending):\r\n assert link in response['link']\r\n self.assert_valid_url(link, expected_ending)" ]
[ "0.69997984", "0.6757967", "0.6695195", "0.66264176", "0.66264176", "0.66264176", "0.63451207", "0.6296661", "0.6101461", "0.60751194", "0.604954", "0.6045755", "0.60173523", "0.5861832", "0.5820362", "0.5799617", "0.5760772", "0.57050264", "0.5704481", "0.5701879", "0.5698439", "0.56340456", "0.56298524", "0.5579598", "0.55548924", "0.5543574", "0.5533561", "0.55235624", "0.55162615", "0.5506118" ]
0.8036169
0
Checks whether a paragraph element is part of a footnote.
def is_footnote(self, par): if par.find_next_sibling('p') is None: return False return self.is_footnote_text(par) or self.is_footnote_link(par)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def should_be_compact_paragraph(self, node):\n\n if isinstance(node.parent, nodes.container):\n if 'non-paragraph' not in node.parent.attributes['classes']:\n return False\n\n # noinspection PyUnresolvedReferences\n return super().should_be_compact_paragraph(node)", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def check_paragraph(line):\n if len(line) > 3 and line[:3] == '⋅⋅⋅':\n return '<p>' + line[3:] + '</p>'\n else:\n return line", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)", "def _get_footnote_par(self, id):\n start = self._current_body_par\n if start is None:\n start = self.parsed\n link = start.find_next(id=id)\n if link is None:\n raise NoFootnoteError(f\"Could not find id {id}\")\n foot_par = link.parent.find_next_sibling('p')\n if not self.is_footnote_text(foot_par):\n raise NoFootnoteError(f\"Failed to find adjacent link paragraph for footnote {id}.\")\n return foot_par", "def is_plugin_note(self, note):\n return bool(self.regex.match(note))", "def is_valid_paragraphs(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.paragraphs is not None:\n return True\n return False", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def is_foot_vertically_inside(self, item_or_group):\n if isinstance(item_or_group, ItemGroup):\n return not self.is_bottom_edge_below(item_or_group) and self.is_bottom_edge_below_top_foot(item_or_group)\n else:\n raise TypeError(\"item_or_group must be instance of ItemGroup.\")", "def parse_footnote(self, footelem) -> FootNote:\n\n fn = FootNote()\n if footelem.text is None:\n fn.footnote = ''\n else:\n fn.footnote = footelem.text.strip()\n\n fn.footnoteid = footelem.attrib['{%s}label' % footelem.nsmap['xlink']]\n\n return fn", "def end_paragraph(self):\n raise NotImplementedError", "def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()", "def is_plugin_note(self, note):\n return False", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def test_p_tag_is_not_empty_element(self):\n soup = self.soup(\"<p />\")\n self.assertFalse(soup.p.is_empty_element)\n self.assertEqual(str(soup.p), \"<p></p>\")", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def footnote(self) -> str:\n return self._footnote", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def footnote(self, footnote: str):\n\n self._footnote = footnote", "def contains_pronoun(cluster):\n for mention in cluster:\n if any([w.tag_.startswith(\"PRP\") for w in mention]):\n # Found a mention with a pronoun\n return True\n return False", "def is_postal_code(elem):\n return 'post' in elem.attrib['k']", "def is_break_tag(self, el):\n\n should_break = False\n if self.type == 'odp':\n if el.name == 'page' and el.namespace and el.namespace == self.namespaces['draw']:\n should_break = True\n return should_break", "def is_punct(self, word, language):" ]
[ "0.8045934", "0.7436927", "0.6865161", "0.60696536", "0.60482085", "0.6026402", "0.6006918", "0.57114613", "0.56973517", "0.56481415", "0.5624542", "0.55489916", "0.5463017", "0.53871185", "0.52857167", "0.5267883", "0.5256443", "0.5246128", "0.5203835", "0.5170078", "0.51535326", "0.51524526", "0.5114998", "0.5101653", "0.50892323", "0.5081919", "0.5063406", "0.50544477", "0.5050697", "0.502158" ]
0.79070187
1
Walk over pararaphs in the main text. If a footnote link is found, jump to that paragraph, then back to the main text.
def linked_text_paragraphs(self): for par in self._main_paragraphs_raw(): par_links = par.find_all('a') if len(par_links) == 0: self.main_count += len(par.text) yield par.text else: for el in par.contents: if el.name is None: #this is plain text self.main_count += len(str(el)) yield str(el) elif el.name == "a" and "href" in el.attrs: id = el["href"].lstrip('#') try: foot_par = self._get_footnote_par(id) except NoFootnoteError: self.log(f"Could not find footnote for {id}, skipping.") self.footnote_count += len(foot_par.text) yield foot_par.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment_paragraphs(root_el, cites=[]):\n from capdb.models import Citation\n\n last_el_ends_mid_sentence = False\n join_with_last_el = False\n html_to_prepend_to_next_el = ''\n\n # build a lookup like {\"935 F.3d\": 1, \"123 Mass.\": 2}\n reporter_indexes = {}\n for i, cite in enumerate(Citation.sorted_by_type(cites)):\n eyecite_cite = next(extract_citations_from_text(cite.cite), None)\n if eyecite_cite:\n volume = eyecite_cite.groups['volume']\n reporter = eyecite_cite.groups['reporter']\n reporter_indexes[f\"{volume} {reporter}\"] = i+1\n\n # special case -- \"[134 Hawai'i 89]\" is a page number for \"134 Haw. 86\"\n if reporter == 'Haw.':\n reporter_indexes[f\"{volume} Hawai'i\"] = i + 1\n\n # process each paragraph\n for el_pq in PyQuery(root_el)('root').children().items():\n el = el_pq[0]\n if el.tag == 'header-end':\n continue\n\n html = inner_html(el)\n page_label = None\n exact_match = False\n index = 1\n\n # clean el whitespace\n clean_html = re.sub(r'\\s+|^<br>|<br>$', ' ', html).strip()\n if not clean_html:\n el_pq.remove()\n continue\n\n # strip tags to handle examples like\n # \"<p><strong>[16 N.Y.3d 274] <strong> <p/></strong></strong> <p> <strong> [945 N.E.2d 484]</strong> </p> <p> <strong>OPINION OF THE COURT</strong> </p></p>\"\n # in NE2d/945/945ne2d484.xml\n html_no_tags = strip_tags(clean_html).strip()\n\n # check for 'Page 123'\n m = re.match(r'Page (\\d+)$', html_no_tags)\n if m:\n page_label = make_page_label(m[1])\n exact_match = True\n\n # check for '[123 Mass. 456]'\n else:\n m = re.search(r\"\\[(?P<volume>\\d+) (?P<reporter>[A-Z][A-Za-z0-9 .']+) (?P<page>\\d+)\\]\", html_no_tags)\n if m:\n vol_reporter = f\"{m['volume']} {m['reporter']}\"\n if vol_reporter in reporter_indexes:\n index = reporter_indexes[vol_reporter]\n is_valid_reporter = True\n else:\n is_valid_reporter = False\n exact_match = m[0] == html_no_tags\n if exact_match or is_valid_reporter:\n page_label = make_page_label(m['page'], index)\n\n # handle page label found\n if page_label:\n clean_html = clean_html.replace(escape(m[0]), page_label)\n\n if exact_match:\n if last_el_ends_mid_sentence:\n join_with_last_el = True\n html_to_prepend_to_next_el += clean_html\n el_pq.remove()\n continue\n\n if html_to_prepend_to_next_el:\n clean_html = html_to_prepend_to_next_el + clean_html\n html_to_prepend_to_next_el = ''\n\n if join_with_last_el:\n join_with_last_el = False\n prev_el = el_pq.prev()\n if prev_el[0].tag == el_pq[0].tag:\n prev_el.append(('' if prev_el.text().endswith('-') else ' ')+clean_html)\n el_pq.remove()\n continue\n\n last_el_ends_mid_sentence = bool(mid_sentence_re.search(html_no_tags))\n\n if clean_html != html:\n el_pq.html(clean_html)", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def test_forward_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"15.0\", \"15.0\"),\n command_name=\"forward-paragraph\",\n )", "def add_paragraph_marks(text, keep_line_endings=True, maxlength=72):\n\n # add # after line that ends with full stop, question and exclamation marks:\n ptrn = r\"([.؟!] *[\\r\\n]+(?:PageV\\w{2}P\\d+[abAB]?[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n # add # after section titles (but not before page numbers and sub-titles)\n ptrn = r\"(### .+[\\r\\n]+(?:PageV\\w{2}P\\d+[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n if keep_line_endings:\n # add the tildas for continued lines:\n new_text = \"\"\n for line in re.split(r\"([\\r\\n]+)\", text):\n if not line.startswith((\"P\", \"#\", \"~~\")) \\\n and not re.match(r\"[\\r\\n]+\", line):\n line = \"~~\"+line\n new_text += line\n else:\n # move page number to the previous line:\n ptrn = r\"([^ \\r\\n.؟!]) *[\\r\\n]+(PageV[^P]+P[\\w]+) *[\\r\\n]+\"\n text = re.sub(ptrn, r\"\\1 \\2 \", text)\n # Add paragraph signs before every new line:\n ptrn = r\"([\\r\\n]+)([^\\r\\n#P\\s])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n # break long lines into shorter lines:\n new_text = wrap(text, maxlength)\n\n new_text = re.sub(\"~~#\", \"#\", new_text)\n new_text = re.sub(r\"~~([^\\n]+%~%)\", r\"# \\1\", new_text)\n new_text = re.sub(r\"~~\\.\\./\", \"../\", new_text)\n\n return new_text", "def _get_footnote_par(self, id):\n start = self._current_body_par\n if start is None:\n start = self.parsed\n link = start.find_next(id=id)\n if link is None:\n raise NoFootnoteError(f\"Could not find id {id}\")\n foot_par = link.parent.find_next_sibling('p')\n if not self.is_footnote_text(foot_par):\n raise NoFootnoteError(f\"Failed to find adjacent link paragraph for footnote {id}.\")\n return foot_par", "def test_back_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"6.7\", \"6.7\"),\n command_name=\"back-paragraph\",\n )", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def textparse(self,\r\n analysetext,\r\n depth=0,\r\n keys=None,\r\n re_entering=False,\r\n newindex=Index(1)):\r\n if keys is None:\r\n keys = set()\r\n if LEFTNOTE not in analysetext \\\r\n or extract.embedded_extract(analysetext)[2] == 0:\r\n return\r\n #test if it contains embedded text\r\n\r\n## ee = extract.embedded_extract(RIGHTNOTE.join(LEFTNOTE.\r\n##join(analysetext.split(LEFTNOTE)[1:]).split(RIGHTNOTE)[:-1]),eliminate = True)\r\n\r\n ee_temp = extract.embedded_extract(analysetext)\r\n embeddedlist = ee_temp[0]\r\n\r\n if depth-1 in self.pass_key_dict:\r\n\r\n self.pass_key_dict[depth] = self.pass_key_dict[depth-1]\r\n else:\r\n self.pass_key_dict[depth] = [[list(keys)], []]\r\n\r\n emb_len = str(len(embeddedlist))\r\n\r\n for a_temp, phrase in enumerate(embeddedlist):\r\n if a_temp<10 or (a_temp>9 and a_temp<100\r\n and a_temp%10 == 0) or (a_temp>99\r\n and a_temp%100==0):\r\n #display counter for embedded notes\r\n print()\r\n print(str(a_temp)+'/'+emb_len)\r\n\r\n\r\n\r\n\r\n\r\n\r\n if extract.embedded_extract(phrase)[2] > 1:\r\n\r\n\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n RIGHTNOTE.join(LEFTNOTE.join(phrase.split(LEFTNOTE)[1:])\r\n .split(RIGHTNOTE)[:-1]),\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n else:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n phrase,\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n newindex = self.textparse(phrase[1:-1],\r\n depth+1,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n\r\n\r\n else:\r\n\r\n newindex = self.textinterpret(phrase,\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n print()\r\n return newindex", "def search_loop(self, pattern, parent, cell_name, paragraph):\n index = self.paragraphs.index(paragraph)\n self.paragraphs[index] = \"\"\n while True:\n index += 1\n try:\n para = self.paragraphs[index].rstrip()\n try:\n if re.match(pattern, para):\n self.datafields[f\"{parent} {self.active}\"][cell_name] = para\n self.paragraphs[index] = \"\"\n break\n if index is len(self.paragraphs):\n break\n except KeyError:\n pass\n except IndexError:\n break", "def para_parse(text, j, op_b, cl_b):\n\n depth = 0\n loc2 = j\n\n while 1:\n if text[loc2] == op_b:\n depth = depth + 1\n\n elif text[loc2] == cl_b:\n depth = depth - 1\n if depth == 0:\n break\n loc2 = loc2 + 1\n return loc2", "def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()", "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def test_forward_paragraph_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"10.0\", \"10.0\"),\n after_sel=(\"10.0\", \"15.0\"),\n command_name=\"forward-paragraph-extend-selection\",\n )", "def _has_page_jump(text):\n # Determines matches with format strings.\n for format_tuple in _FORMAT_STRINGS:\n jump = _get_jump_with_pattern(text, format_tuple)\n if jump:\n return jump\n\n # Recognizes common OCR for \"From page 1\".\n match = _match_pattern(text, r\"(^Frompagel$){e<=3}\")\n if match and text[-1] == 'l':\n return -1", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def test_extend_to_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"13.33\"),\n command_name=\"extend-to-paragraph\",\n )", "def links_to_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'a')", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def parse_paragraphs(self):\n paragraphs = self.paragraphs\n for paragraph in paragraphs:\n try:\n if paragraph == \"Oznaczenie sądu\" and not self.locked_cells[\"Oznaczenie sądu\"]:\n self.search_index(4, \"Oznaczenie sądu\", paragraph)\n\n if paragraph.startswith(\"3.Firma,\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"3.Nazwa\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"1.Siedziba\") and not self.locked_cells[\"Siedziba\"]:\n self.search_index(4, \"Siedziba\", paragraph)\n\n if paragraph.startswith(\"2.Adres\") and not self.locked_cells[\"Adres\"]:\n self.search_index(4, \"Adres\", paragraph)\n\n if paragraph.startswith(\"Numer KRS\") and not self.locked_cells[\"KRS\"]:\n self.datafields[\"KRS\"] = paragraph.split()[-1]\n self.locked_cells[\"KRS\"] = True\n\n if paragraph.startswith(\"2.Numer REGON/NIP\") and not self.locked_cells[\"REGON/NIP\"]:\n self.search_index(2, \"REGON/NIP\", paragraph)\n\n if paragraph.startswith(\"1.Oznaczenie formy prawnej\") and not self.locked_cells[\"Forma Prawna\"]:\n self.search_index(2, \"Forma Prawna\", paragraph)\n\n if paragraph.startswith(\"1.Wysokość kapitału zakładowego\"):\n self.search_index(2, \"Kapitał Zakładowy\", paragraph)\n\n if paragraph.startswith(\"5.Kwotowe określenie części kapitału wpłaconego\"):\n self.search_index(2, \"Kapitał Wpłacony\", paragraph)\n\n if paragraph.startswith(\"Rubryka 7 - Dane wspólników\"): # Open \"Wspólnicy\" parsing block.\n self.locked_cells[\"Wspólnicy\"] = True\n\n if paragraph.startswith(\"Rubryka 7 - Komitet założycielski\"): # STOWARZYSZENIE\n break\n\n if paragraph.startswith(\"1.Nazwisko / Nazwa lub firma\") and self.locked_cells[\"Wspólnicy\"]:\n self.active += 1\n self.datafields[f\"Wspólnik {self.active}\"] = {}\n\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Wspólnik\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = rf\"[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Wspólnik\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"3.Numer PESEL/REGON\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[0-9]{9,11}\"\n self.search_loop(pattern, \"Wspólnik\", \"PESEL/REGON\", paragraph)\n\n if paragraph.startswith(\"4.Numer KRS\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[*]+|[0-9]{10}$\"\n self.search_loop(pattern, \"Wspólnik\", \"KRS\", paragraph)\n\n if paragraph.startswith(\"5.Posiadane przez wspólnika udziały\"):\n index = paragraphs.index(paragraph)\n line_1 = paragraphs[index + 2].strip(\" \")\n line_2 = paragraphs[index + 3].strip(\" \")\n if line_2:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1} {line_2}\"\n else:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1}\"\n\n if paragraph == \"ZARZĄD\":\n self.locked_cells[\"Wspólnicy\"] = False # Close \"Wspólnicy\" parsing block.\n self.locked_cells[\"Zarząd\"] = True # Open \"Zarząd\" parsing block.\n self.active = 0\n\n if paragraph.startswith(\"1.Nazwisko\") and self.locked_cells[\"Zarząd\"]:\n self.active += 1\n self.datafields[f\"Zarząd {self.active}\"] = {}\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Zarząd\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Zarząd\"]:\n pattern = rf\"^[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Zarząd\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"5.Funkcja w organie \") and self.locked_cells[\"Zarząd\"]:\n paragraph = paragraph.strip(\"5.Funkcja w organie reprezentującym \")\n self.datafields[f\"Zarząd {self.active}\"][\"Funkcja\"] = paragraph\n\n if paragraph.startswith(\"Rubryka 2 - Organ nadzoru\"):\n self.locked_cells[\"Zarząd\"] = False # Close \"Zarząd\" parsing block.\n except KeyError:\n pass\n return self.datafields", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def treat_page(self) -> None:\n text = self.current_page.text\n\n if self.opt.up:\n text = self.opt.text + '\\n' + text\n elif not self.opt.reorder:\n text += '\\n' + self.opt.text\n else:\n text = textlib.add_text(text, self.opt.text,\n site=self.current_page.site)\n\n self.put_current(text, summary=self.opt.summary, minor=self.opt.minor)", "def test_backward_kill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"backward-kill-paragraph\",\n )", "def extract_paragraph_test(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc(parser = 'cde_parser')\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_test(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def home(self):\n while self.document.characters[self.position-1].character != '\\n':\n self.position -= 1\n if self.position == 0:\n # Got to beginning of file before newline\n break", "def test_back_paragraph_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.5\"),\n after_sel=(\"6.7\", \"9.5\"),\n command_name=\"back-paragraph-extend-selection\",\n )", "def test_fill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Services StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially\n declared disasters are weather related,\n leading to around 500 deaths per year\n and nearly $14 billion in damage.\n StormReady, a program\n started in 1999 in Tulsa, OK,\n helps arm America's\n communities with the communication and\n safety skills needed to save lives and\n property--before and during the event.\n StormReady helps community leaders and\n emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Services StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property--before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"3.7\"),\n after_sel=(\"10.0\", \" 10.0\"),\n command_name=\"fill-paragraph\",\n directives=\"@pagewidth 80\",\n )", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def block(self, text, head_offset=0):\n if not self.lite:\n tre = '|'.join(self.btag)\n else:\n tre = '|'.join(self.btag_lite)\n text = text.split('\\n\\n')\n\n tag = 'p'\n atts = cite = graf = ext = ''\n c1 = ''\n\n out = []\n\n anon = False\n for line in text:\n pattern = r'^(%s)(%s%s)\\.(\\.?)(?::(\\S+))? (.*)$' % (\n tre, self.align_re, self.c\n )\n match = re.search(pattern, line, re.S)\n if match:\n if ext:\n out.append(out.pop() + c1)\n\n tag, atts, ext, cite, graf = match.groups()\n h_match = re.search(r'h([1-6])', tag)\n if h_match:\n head_level, = h_match.groups()\n tag = 'h%i' % max(1, min(int(head_level) + head_offset, 6))\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, graf)\n # leave off c1 if this block is extended,\n # we'll close it at the start of the next block\n\n if ext:\n line = \"%s%s%s%s\" % (o1, o2, content, c2)\n else:\n line = \"%s%s%s%s%s\" % (o1, o2, content, c2, c1)\n\n else:\n anon = True\n if ext or not re.search(r'^\\s', line):\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, line)\n # skip $o1/$c1 because this is part of a continuing\n # extended block\n if tag == 'p' and not self.hasRawText(content):\n line = content\n else:\n line = \"%s%s%s\" % (o2, content, c2)\n else:\n line = self.graf(line)\n\n line = self.doPBr(line)\n if self.html_type == 'xhtml':\n line = re.sub(r'<br>', '<br />', line)\n\n if self.html_type == 'html':\n line = re.sub(r'<br />', '<br>', line)\n\n if ext and anon:\n out.append(out.pop() + \"\\n\" + line)\n elif not eat:\n out.append(line)\n\n if not ext:\n tag = 'p'\n atts = ''\n cite = ''\n graf = ''\n\n if ext:\n out.append(out.pop() + c1)\n return '\\n\\n'.join(out)", "def _do_links(self, text):\r\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\r\n\r\n # `anchor_allowed_pos` is used to support img links inside\r\n # anchors, but not anchors inside anchors. An anchor's start\r\n # pos must be `>= anchor_allowed_pos`.\r\n anchor_allowed_pos = 0\r\n\r\n curr_pos = 0\r\n while True: # Handle the next link.\r\n # The next '[' is the start of:\r\n # - an inline anchor: [text](url \"title\")\r\n # - a reference anchor: [text][id]\r\n # - an inline img: ![text](url \"title\")\r\n # - a reference img: ![text][id]\r\n # - a footnote ref: [^id]\r\n # (Only if 'footnotes' extra enabled)\r\n # - a footnote defn: [^id]: ...\r\n # (Only if 'footnotes' extra enabled) These have already\r\n # been stripped in _strip_footnote_definitions() so no\r\n # need to watch for them.\r\n # - a link definition: [id]: url \"title\"\r\n # These have already been stripped in\r\n # _strip_link_definitions() so no need to watch for them.\r\n # - not markup: [...anything else...\r\n try:\r\n start_idx = text.index('[', curr_pos)\r\n except ValueError:\r\n break\r\n text_length = len(text)\r\n\r\n # Find the matching closing ']'.\r\n # Markdown.pl allows *matching* brackets in link text so we\r\n # will here too. Markdown.pl *doesn't* currently allow\r\n # matching brackets in img alt text -- we'll differ in that\r\n # regard.\r\n bracket_depth = 0\r\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\r\n text_length)):\r\n ch = text[p]\r\n if ch == ']':\r\n bracket_depth -= 1\r\n if bracket_depth < 0:\r\n break\r\n elif ch == '[':\r\n bracket_depth += 1\r\n else:\r\n # Closing bracket not found within sentinel length.\r\n # This isn't markup.\r\n curr_pos = start_idx + 1\r\n continue\r\n link_text = text[start_idx+1:p]\r\n\r\n # Possibly a footnote ref?\r\n if \"footnotes\" in self.extras and link_text.startswith(\"^\"):\r\n normed_id = re.sub(r'\\W', '-', link_text[1:])\r\n if normed_id in self.footnotes:\r\n self.footnote_ids.append(normed_id)\r\n result = '<sup class=\"footnote-ref\" id=\"fnref-%s\">' \\\r\n '<a href=\"#fn-%s\">%s</a></sup>' \\\r\n % (normed_id, normed_id, len(self.footnote_ids))\r\n text = text[:start_idx] + result + text[p+1:]\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = p+1\r\n continue\r\n\r\n # Now determine what this is by the remainder.\r\n p += 1\r\n if p == text_length:\r\n return text\r\n\r\n # Inline anchor or img?\r\n if text[p] == '(': # attempt at perf improvement\r\n match = self._tail_of_inline_link_re.match(text, p)\r\n if match:\r\n # Handle an inline anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n\r\n url, title = match.group(\"url\"), match.group(\"title\")\r\n if url and url[0] == '<':\r\n url = url[1:-1] # '<url>' -> 'url'\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n if title:\r\n title_str = ' title=\"%s\"' % (\r\n _xml_escape_attr(title)\r\n .replace('*', self._escape_table['*'])\r\n .replace('_', self._escape_table['_']))\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n _xml_escape_attr(link_text),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n continue\r\n\r\n # Reference anchor or img?\r\n else:\r\n match = self._tail_of_reference_link_re.match(text, p)\r\n if match:\r\n # Handle a reference-style anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n link_id = match.group(\"id\").lower()\r\n if not link_id:\r\n link_id = link_text.lower() # for links like [this][]\r\n if link_id in self.urls:\r\n url = self.urls[link_id]\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title = self.titles.get(link_id)\r\n if title:\r\n before = title\r\n title = _xml_escape_attr(title) \\\r\n .replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title_str = ' title=\"%s\"' % title\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n link_text.replace('\"', '&quot;'),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result = '<a href=\"%s\"%s>%s</a>' \\\r\n % (url, title_str, link_text)\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = match.end()\r\n continue\r\n\r\n # Otherwise, it isn't markup.\r\n curr_pos = start_idx + 1\r\n\r\n return text" ]
[ "0.5886465", "0.58725065", "0.58705807", "0.57472676", "0.56360775", "0.55975014", "0.5573705", "0.5540514", "0.54371405", "0.54029816", "0.53338504", "0.53178537", "0.5315798", "0.5282773", "0.52491987", "0.52362645", "0.5195637", "0.5168886", "0.513893", "0.51377296", "0.5118753", "0.5088631", "0.5075016", "0.5071764", "0.5047268", "0.5017047", "0.5005871", "0.49910498", "0.49874976", "0.4936778" ]
0.6464983
0
Material saver. Saves material and their properties the JSON file for type building elements. If the Project parent is set, it automatically saves it to the file given in Project.data. Alternatively you can specify a path to a file with Materials. If this file does not exist, a new file is created.
def save_material(material, data_class): data_class.material_bind["version"] = "0.7" add_to_json = True warning_text = ("Material with same name and same properties already " "exists in JSON, consider this material or revising your " "properties") for id, check in data_class.material_bind.items(): if id != "version": if check["name"] == material.name and \ check["density"] == material.density and \ check["thermal_conduc"] == material.thermal_conduc and \ check["heat_capac"] == material.heat_capac and \ check[ "thickness_default"] == material.thickness_default and \ check["thickness_list"] == material.thickness_list: warnings.warn(warning_text) print(material.name) add_to_json = False break if add_to_json is True: data_class.material_bind[ material.material_id] = collections.OrderedDict() data_class.material_bind[ material.material_id]["name"] = material.name data_class.material_bind[ material.material_id]["density"] = material.density data_class.material_bind[ material.material_id]["thermal_conduc"] = material.thermal_conduc data_class.material_bind[ material.material_id]["heat_capac"] = material.heat_capac data_class.material_bind[ material.material_id][ "thickness_default"] = material.thickness_default data_class.material_bind[ material.material_id]["thickness_list"] = material.thickness_list data_class.material_bind[ material.material_id]["solar_absorp"] = material.solar_absorp with open(utilities.get_full_path(data_class.path_mat), 'w') as file: file.write(json.dumps( data_class.material_bind, indent=4, separators=(',', ': ')))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_material(filename, mat):\n out = np.array([mat.wav, mat.eps.real, mat.eps.imag,\n mat.mu.real, mat.mu.imag]).T\n header = \"Wavelength\\teps_real\\teps_imag\\tmu_real\\tmu_imag\"\n miepy.array_io.save(filename, out, header=header)", "def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def export_material(self, bo, bm):\n\n # Sometimes, a material might need to be single-use. Right now, the most apparent example\n # of that situation is when a lightmap image is baked. Wavesets are in the same boat, but\n # that's a special case as of the writing of this code.\n single_user = self._requires_single_user_material(bo, bm)\n if single_user:\n mat_name = \"{}_AutoSingle\".format(bm.name) if bo.name == bm.name else \"{}_{}\".format(bo.name, bm.name)\n self._report.msg(\"Exporting Material '{}' as single user '{}'\", bm.name, mat_name, indent=1)\n hgmat = None\n else:\n mat_name = bm.name\n self._report.msg(\"Exporting Material '{}'\", mat_name, indent=1)\n hsgmat = self._mgr.find_key(hsGMaterial, name=mat_name, bl=bo)\n if hsgmat is not None:\n return hsgmat\n\n hsgmat = self._mgr.add_object(hsGMaterial, name=mat_name, bl=bo)\n slots = [(idx, slot) for idx, slot in enumerate(bm.texture_slots) if self._can_export_texslot(slot)]\n\n # There is a major difference in how Blender and Plasma handle stencils.\n # In Blender, the stencil is on top and applies to every layer below is. In Plasma, the stencil\n # is below the SINGLE layer it affects. The main texture is marked BindNext and RestartPassHere.\n # The pipeline indicates that we can render 8 layers simultaneously, so we will collect all\n # stencils and apply this arrangement. We're going to limit to 6 stencils however. 1 layer for\n # main texture and 1 piggyback.\n num_stencils = sum((1 for i in slots if i[1].use_stencil))\n if num_stencils > _MAX_STENCILS:\n raise ExportError(\"Material '{}' uses too many stencils. The maximum is {}\".format(bm.name, _MAX_STENCILS))\n stencils = []\n restart_pass_next = False\n\n # Loop over layers\n for idx, slot in slots:\n # Prepend any BumpMapping magic layers\n if slot.use_map_normal:\n if bo in self._bump_mats:\n raise ExportError(\"Material '{}' has more than one bumpmap layer\".format(bm.name))\n du, dw, dv = self.export_bumpmap_slot(bo, bm, hsgmat, slot, idx)\n hsgmat.addLayer(du.key) # Du\n hsgmat.addLayer(dw.key) # Dw\n hsgmat.addLayer(dv.key) # Dv\n\n if slot.use_stencil:\n stencils.append((idx, slot))\n else:\n tex_layer = self.export_texture_slot(bo, bm, hsgmat, slot, idx)\n if restart_pass_next:\n tex_layer.state.miscFlags |= hsGMatState.kMiscRestartPassHere\n restart_pass_next = False\n hsgmat.addLayer(tex_layer.key)\n if slot.use_map_normal:\n self._bump_mats[bo] = (tex_layer.UVWSrc, tex_layer.transform)\n # After a bumpmap layer(s), the next layer *must* be in a\n # new pass, otherwise it gets added in non-intuitive ways\n restart_pass_next = True\n if stencils:\n tex_state = tex_layer.state\n if not tex_state.blendFlags & hsGMatState.kBlendMask:\n tex_state.blendFlags |= hsGMatState.kBlendAlpha\n tex_state.miscFlags |= hsGMatState.kMiscRestartPassHere | hsGMatState.kMiscBindNext\n curr_stencils = len(stencils)\n for i in range(curr_stencils):\n stencil_idx, stencil = stencils[i]\n stencil_name = \"STENCILGEN_{}@{}_{}\".format(stencil.name, bm.name, slot.name)\n stencil_layer = self.export_texture_slot(bo, bm, hsgmat, stencil, stencil_idx, name=stencil_name)\n if i+1 < curr_stencils:\n stencil_layer.state.miscFlags |= hsGMatState.kMiscBindNext\n hsgmat.addLayer(stencil_layer.key)\n\n # Plasma makes several assumptions that every hsGMaterial has at least one layer. If this\n # material had no Textures, we will need to initialize a default layer\n if not hsgmat.layers:\n layer = self._mgr.find_create_object(plLayer, name=\"{}_AutoLayer\".format(bm.name), bl=bo)\n self._propagate_material_settings(bm, layer)\n hsgmat.addLayer(layer.key)\n\n # Cache this material for later\n mat_list = self._obj2mat.setdefault(bo, [])\n mat_list.append(hsgmat.key)\n\n # Looks like we're done...\n return hsgmat.key", "def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def save(self, filename=None):\n if filename is None:\n filename = \"morse_smale_complex.json\"\n with open(filename, \"w\") as fp:\n fp.write(self.to_json())", "def savemat(self, file_name, mdict=None, appendmat=True, **kwargs):\n # Set mdict default value to empty dictionary\n if mdict is None:\n mdict = {}\n\n # Merge mdict with attributes dictionary, giving mdict the upper-hand\n # in case of inconsistency\n dsavemat = {**vars(self), **mdict}\n\n # Save the merged dictionary to a .mat file\n scipy.io.savemat(file_name, dsavemat, appendmat, **kwargs)", "def AssembleStructuralMaterialsJson(KratosWindowManager):\n for key in KratosWindowManager.MatSave.keys():\n if(DEBUG):\n print(key)\n print(type(KratosWindowManager.MatSave[key]))\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"Variables\"][key]=KratosWindowManager.MatSave[key]\n for bclistobject in KratosWindowManager.boundaryConditionEditor:\n if(DEBUG):\n print(bclistobject.name)\n if bclistobject.entityType=='Element':\n sm.structuralmaterials_dict[\"properties\"][0][\"model_part_name\"]=bclistobject.name\n\n\n if KratosWindowManager.is2D:\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"constitutive_law\"][\"name\"]=\"KratosMultiphysics.StructuralMechanicsApplication.LinearElasticPlaneStrain2DLaw\"\n else:\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"constitutive_law\"][\"name\"]=\"KratosMultiphysics.StructuralMechanicsApplication.LinearElastic3DLaw\"\n \n\n if(DEBUG):\n print(sm.structuralmaterials_dict)\n return sm.WriteMaterialToJson(sm.structuralmaterials_dict)", "def test_save_materials(temp_dir):\n image1 = [[[0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255]]]\n image2 = [[[0, 0, 0], [255, 255, 255]], [[255, 255, 255], [0, 0, 0]]]\n image3 = [[[255, 255, 255], [255, 255, 255]], [[0, 0, 0], [0, 0, 0]]]\n\n data = [\n (\"image1.png\", Image.fromarray(np.array(image1, dtype=np.uint8))),\n (\"image2.png\", Image.fromarray(np.array(image2, dtype=np.uint8))),\n (\"image3.png\", Image.fromarray(np.array(image3, dtype=np.uint8))),\n ]\n save_materials(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image1.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image2.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image3.png\"))", "def read_material_data(self, material):\n material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))\n\n inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)\n self.name = inputs['Name']\n self.materialName = material\n self.elements = inputs['Elements']\n self.zaids = inputs['Elemental ZAIDs']\n self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []\n self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []\n self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []\n self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []\n self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []\n self.density = inputs['Density']\n self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def write_savefile(state: PhysicsState, file: Path):\n if file.suffix.lower() != '.json':\n # Ensure a .json suffix.\n file = file.parent / (file.name + '.json')\n log.info(f'Saving to savefile {file.resolve()}')\n\n savefile_json_dict = google.protobuf.json_format.MessageToDict(\n state.as_proto(),\n including_default_value_fields=False,\n preserving_proto_field_name=True,\n use_integers_for_enums=False,\n )\n\n for i, component in enumerate(savefile_json_dict['engineering']['components']):\n component['name'] = strings.COMPONENT_NAMES[i]\n\n with open(file, 'w') as outfile:\n json.dump(savefile_json_dict, outfile, indent=2)\n\n return file", "def set_material(properties,object,finish,normal):\n if object not in properties:\n properties[object.getName()]={}\n properties[object.getName()][\"finish\"]=finish\n properties[object.getName()][\"normal\"]=normal", "def write_saver_defs(self):\n assert self.savers_constructed\n full_saver_def = self.full_saver.as_saver_def()\n full_file = self.params.save_dir+self.params.model_name+\"_v\"+self.params.version+\".def\"\n with open(full_file, \"wb\") as f:\n f.write(full_saver_def.SerializeToString())\n self.logger.log_info(\"Full saver def saved in file %s\"%full_file)", "def save(self):\n for name, param in self.components.items():\n param_path = os.path.join(self.model_path, \"%s.mat\" % name)\n if hasattr(param, 'params'):\n param_values = {p.name: p.get_value() for p in param.params}\n else:\n param_values = {name: param.get_value()}\n scipy.io.savemat(param_path, param_values)", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def make_settings():\n settings = {}\n num_of_rocks = 1\n\n obj = json.load(open('assets/add_mesh_rocks.json'))\n presets = [obj[\"settings\"][\"default\"]] + obj[\"settings\"][\"preset\"]\n\n for preset in presets:\n title = preset[\"title\"]\n # SIZE\n size = preset[\"size\"]\n\n x, y, z = size[\"scale\"]\n if title == \"Default\":\n scale = uniform(float(x[\"lower\"]), float(x[\"upper\"]))\n scale_X = [scale, scale]\n scale_Y = [scale, scale]\n scale_Z = [scale, scale]\n else:\n scale_X = [float(x[\"lower\"]), float(x[\"upper\"])]\n scale_Y = [float(y[\"lower\"]), float(y[\"upper\"])]\n scale_Z = [float(z[\"lower\"]), float(z[\"upper\"])]\n\n x, y, z = size[\"skew\"]\n skew_X = float(x[\"value\"])\n skew_Y = float(y[\"value\"])\n skew_Z = float(z[\"value\"])\n\n scale_fac = ast.literal_eval(size[\"scale_fac\"])\n use_scale_dis = bool(size[\"use_scale_dis\"])\n\n # SHAPE\n shape = preset[\"shape\"]\n\n deform = float(shape[\"deform\"])\n rough = float(shape[\"rough\"])\n detail = float(shape[\"detail\"])\n display_detail = float(shape[\"display_detail\"])\n smooth_fac = float(shape[\"smooth_fac\"])\n smooth_it = float(shape[\"smooth_it\"])\n\n\n # MATERIAL\n material = preset[\"material\"]\n \n mat_enable = bool(material[\"mat_enable\"])\n mat_color = ast.literal_eval(material[\"mat_color\"])\n mat_bright = float(material[\"mat_bright\"])\n mat_rough = float(material[\"mat_rough\"])\n mat_spec = float(material[\"mat_spec\"])\n mat_hard = float(material[\"mat_hard\"])\n mat_use_trans = bool(material[\"mat_use_trans\"])\n mat_alpha = float(material[\"mat_alpha\"])\n mat_cloudy = float(material[\"mat_cloudy\"])\n mat_IOR = float(material[\"mat_IOR\"])\n mat_mossy = float(material[\"mat_mossy\"])\n\n # RANDOM\n random = preset[\"random\"]\n\n use_generate = bool(random[\"use_generate\"])\n use_random_seed = bool(random[\"use_random_seed\"])\n user_seed = float(random[\"user_seed\"])\n\n\n settings[title] = [\n context,\n scale_X,\n skew_X,\n scale_Y,\n skew_Y,\n scale_Z,\n skew_Z,\n scale_fac,\n detail,\n display_detail,\n deform,\n rough,\n smooth_fac,\n smooth_it,\n mat_enable,\n mat_color,\n mat_bright,\n mat_rough,\n mat_spec,\n mat_hard,\n mat_use_trans,\n mat_alpha,\n mat_cloudy,\n mat_IOR,\n mat_mossy,\n num_of_rocks,\n user_seed,\n False,\n use_random_seed\n ]\n\n return settings", "def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()", "def append_material(self, material):\n # First check if asset attribute exists; if not, define the asset attribute\n if not hasattr(self, \"asset\"):\n self.asset = ET.Element(\"asset\")\n # If the material name is not in shared materials, add this to our assets\n if material.name not in self.shared_materials:\n self.asset.append(ET.Element(\"texture\", attrib=material.tex_attrib))\n self.asset.append(ET.Element(\"material\", attrib=material.mat_attrib))\n # Add this material name to shared materials if it should be shared\n if material.shared:\n self.shared_materials.add(material.name)\n self.shared_textures.add(material.tex_attrib[\"name\"])\n # Update prefix for assets\n add_prefix(root=self.asset, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def build_storage(self, mine, planet=None):\n self.send_build_post(\"resources\", planet, codes.storage[mine])", "def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save(self, path=''):\n if not self.__isBuilt:\n self._rebuild()\n if not path:\n self.w.save(self.path)\n else:\n if not path.endswith('.shp'):\n path = os.path.splitext(path)[0] + '.shp'\n self.w.save(path)", "def serialize(file):\n global root_dir\n global wells_list\n global tops_list\n global project_file\n project_file = file\n\n current_project = Project(root_dir, wells_list, tops_list, file)\n f = open(file, 'wb')\n pickle.dump(current_project, f)", "def persist(self) -> None:\n logger.info('Generating or Updating meta data file {}'.format(self.file_path))\n with open(self.file_path, 'w', encoding='utf-8') as meta_file:\n meta_file.write(json.dumps(self, default=lambda value: value.__dict__))", "def create_blender_material(self, ogremat, mat, meshId, matIdx):\n logger.debug(\"create_blender_material\")\n textures = ogremat.textures\n bmat = None\n idx = 0\n mat_name = mat[\"name\"].split(\"/\")[0]\n try:\n bmat = bpy.data.materials[mat_name]\n if bversion == 3:\n bmat.name = \"tobedeleted\"\n bmat = bpy.data.materials.new(mat_name)\n except:\n bmat = bpy.data.materials.new(mat_name)\n self.set_uuid(bmat, ogremat.uuid)\n # material base properties\n if ogremat.doambient:\n if bversion == 2:\n bmat.setAmb(ogremat.ambient)\n else:\n bmat.ambient = ogremat.ambient\n if ogremat.specular:\n if bversion == 2:\n bmat.setSpec(1.0)\n bmat.setSpecCol(ogremat.specular[:3])\n bmat.setHardness(int(ogremat.specular[3]*4.0))\n else:\n bmat.specular_intensity = 1.0\n ogremat.specular[:3]\n bmat.specular_color = ogremat.specular[:3]\n bmat.specular_hardness = int(ogremat.specular[3]*4.0)\n if ogremat.alpha < 1.0:\n bmat.alpha = ogremat.alpha\n # specular\n for layerName, textureId in ogremat.layers.items():\n if layerName == 'shadowMap':\n if bversion == 2:\n bmat.setMode(Blender.Material.Modes['SHADOWBUF'] & bmat.getMode())\n else:\n bmat.use_cast_buffer_shadows = True\n if textureId:\n textureId = textureId\n pars = (bmat, layerName, mat[\"name\"], ogremat, idx, meshId,\n matIdx)\n if textureId in self._imported_assets:\n btex = self._imported_assets[textureId]\n self.layer_ready(btex, *pars)\n elif self.simrt:\n pars = (textureId,) + pars\n if not self.Asset.downloadAsset(textureId, 0,\n self.texture_downloaded, \n pars,\n main=self.doTextureDownloadTranscode):\n self.add_texture_callback(textureId, self.layer_ready, pars[1:])\n idx += 1\n self._imported_materials[mat[\"name\"]] = bmat\n return bmat", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def __render_material_preview(self, scene):\n\n # Don't render material thumbnails.\n (width, height) = util.get_render_resolution(scene)\n if width <= 96:\n return\n\n # Collect objects and their materials in a object -> [materials] dictionary.\n objects_materials = {}\n for obj in (obj for obj in scene.objects if obj.is_visible(scene) and not obj.hide_render):\n for mat in util.get_instance_materials(obj):\n if mat is not None:\n if obj.name not in objects_materials.keys():\n objects_materials[obj] = []\n objects_materials[obj].append(mat)\n\n # Find objects that are likely to be the preview objects.\n preview_objects = [o for o in objects_materials.keys() if o.name.startswith('preview')]\n if not preview_objects:\n return\n\n # Find the materials attached to the likely preview object.\n likely_materials = objects_materials[preview_objects[0]]\n if not likely_materials:\n return\n\n # Build the path to the output preview project.\n preview_output_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"material_preview\")\n preview_project_filepath = os.path.join(preview_output_dir, \"material_preview.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(preview_output_dir):\n try:\n os.makedirs(preview_output_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(preview_output_dir))\n return\n\n # Copy assets from template project to output directory.\n preview_template_dir = os.path.join(os.sep.join(util.realpath(__file__).split(os.sep)[:-1]), \"mat_preview\")\n existing_files = os.listdir(preview_output_dir)\n for item in os.listdir(preview_template_dir):\n if item not in existing_files:\n copyfile(os.path.join(preview_template_dir, item), os.path.join(preview_output_dir, item))\n\n prev_mat = likely_materials[0]\n prev_type = prev_mat.preview_render_type.lower()\n\n # Export the project.\n writer = projectwriter.Writer()\n file_written = writer.export_preview(scene,\n preview_project_filepath,\n prev_mat,\n prev_type,\n width,\n height)\n if not file_written:\n print('Error while exporting. Check the console for details.')\n return\n\n # Render the project.\n self.__render_project_file(scene, preview_project_filepath)", "def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))" ]
[ "0.57051945", "0.55286336", "0.54615587", "0.53826904", "0.5350502", "0.53496337", "0.5319556", "0.52859074", "0.5285472", "0.52815086", "0.5229431", "0.5202984", "0.5185666", "0.5141686", "0.5140259", "0.513041", "0.51060146", "0.5105606", "0.50921017", "0.5083165", "0.5072597", "0.50634295", "0.50603575", "0.50601643", "0.5031712", "0.5016729", "0.4997566", "0.4988507", "0.49884725", "0.4957583" ]
0.65841603
0
Create a new Settings, reading from a default location for the given domain (~/Library/Preferences/%s.plist).
def __init__(self, domain='com.markfickett.gors'): settingsDir = os.path.expanduser(self.__SETTINGS_DIR) if not os.path.isdir(settingsDir): os.makedirs(settingsDir) self.__settingsFileName = os.path.join(settingsDir, domain + '.plist') if os.path.isfile(self.__settingsFileName): self.__settings = plistlib.readPlist( self.__settingsFileName) else: self.clear() self.__currentGroupNames = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settings_create(ctx):\n # Choose where and whether to save the configuration file.\n path = ctx.obj['load_path']\n if path:\n click.confirm(\n 'A settings file already exists. Continuing will override it. '\n 'Do you want to continue?',\n abort=True,\n )\n else:\n path = ctx.obj['save_path']\n\n # Get information about Pulp.\n pulp_config = {'pulp': _get_pulp_properties()}\n pulp_config['hosts'] = [\n _get_host_properties(pulp_config['pulp']['version'])\n ]\n pulp_config['pulp']['version'] = str(pulp_config['pulp']['version'])\n try:\n config.validate_config(pulp_config) # This should NEVER fail!\n except exceptions.ConfigValidationError:\n print(\n 'An internal error has occurred. Please report this to the Pulp '\n 'Smash developers at https://github.com/PulpQE/pulp-smash/issues',\n file=sys.stderr,\n )\n raise\n\n # Write the config to disk.\n with open(path, 'w') as handler:\n handler.write(json.dumps(pulp_config, indent=2, sort_keys=True))\n click.echo('Settings written to {}.'.format(path))", "def open_settings(location, show_defaults=False, settings_type=None, **kwargs):\n prefs = kwargs.get('prefs', settings.InternalSettings())\n if settings_type:\n settings_type = settings_type.lower()\n\n target_path = prefs.get_settings_path(location, settings_type)\n if not target_path.exists():\n dirname = target_path.parent\n makedirs(dirname, mode=0o775, exist_ok=True)\n with open(target_path, 'a', newline='\\n') as settings_file:\n settings_file.write('{}')\n\n if show_defaults:\n openable = [prefs.get_settings_path('default', settings_type, target_path.suffix), target_path]\n else:\n openable = [target_path]\n return result.Success(openable=openable)", "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def loadSettings(home_dir,pd_dir):\n\n settingsXML = os.path.join(pd_dir,\"settings.xml\")\n\n #print(\"Loading settings from {0}\".format(settingsXML))\n\n global installationTree\n global installationSettings\n global domainPath\n global userEmail\n global userToken\n\n if os.path.isfile(settingsXML):\n installationTree = etree.parse(settingsXML)\n installationSettings = installationTree.getroot()\n\n for child in installationSettings:\n if child.tag == \"domain_path\":\n domainPath = child.text\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n if child.tag == \"email\":\n userEmail = child.text\n\n if child.tag == \"token\":\n userToken = child.text\n\n return\n\n if installationSettings is None:\n installationSettings = etree.Element(\"{http://settings.planning.domains}settings\")\n installationTree = etree.ElementTree(installationSettings)\n\n domainPath = input(\"Enter path for installing files (or hit enter to use {0}): \".format(os.path.join(home_dir,\"planning.domains\")))\n\n domainPath = domainPath.lstrip()\n domainpath = domainPath.rstrip()\n\n if domainPath == \"\":\n domainPath = os.path.join(home_dir,\"planning.domains\")\n\n if os.path.isfile(domainPath):\n print(\"Fatal error: there is already a file called {0}\".format(domainPath))\n exit(1)\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n etree.SubElement(installationSettings,\"domain_path\").text = domainPath\n\n userEmail = input(\"Enter email for API updates: \")\n userToken = input(\"Enter token for API updates (leave blank if none provided): \")\n\n etree.SubElement(installationSettings,\"email\").text = userEmail\n etree.SubElement(installationSettings,\"token\").text = userToken\n\n saveSettings()", "def set_by_domain(domain):\r\n if not has_configuration_set() or not domain:\r\n return\r\n\r\n for key, value in settings.MICROSITE_CONFIGURATION.items():\r\n subdomain = value.get('domain_prefix')\r\n if subdomain and domain.startswith(subdomain):\r\n _set_current_microsite(key, subdomain, domain)\r\n return\r\n\r\n # if no match on subdomain then see if there is a 'default' microsite defined\r\n # if so, then use that\r\n if 'default' in settings.MICROSITE_CONFIGURATION:\r\n _set_current_microsite('default', subdomain, domain)", "def make_pref_file():\r\n pref_dict = {\"default_user\": None}\r\n\r\n with open(os.path.join(os.path.dirname(__file__), \"preferences.json\"), \"w\") as pref:\r\n pref.write(json.dumps(pref_dict, indent=4))\r\n\r\n return pref_dict", "def create_domain(self, domain: str) -> Session:\n uri = f\"{self.uri}/domains\"\n data = {\n \"hostname\": domain\n }\n response = self.request(uri=uri, method=\"POST\", data=data)\n\n return response", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def make_settings(pypirc):\n default_pypirc = \"\"\"\n [pypi]\n username:foo\n password:bar\n \"\"\"\n\n def _settings(pypirc_text=default_pypirc, **settings_kwargs):\n pypirc.write(textwrap.dedent(pypirc_text))\n\n settings_kwargs.setdefault(\"sign_with\", None)\n settings_kwargs.setdefault(\"config_file\", str(pypirc))\n\n return settings.Settings(**settings_kwargs)\n\n return _settings", "def load_settings(self):\n\n self.domains = []\n self.clear_settings()\n api_keys = self.api_key_instance.get_api_keys()\n if api_keys:\n for domain, api_key in list(api_keys.items()):\n self.domains.append(domain)\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains))).setText(\n domain\n )\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains))).setText(\n api_key\n )\n\n # Hide un-populated domain rows\n for entry in range(len(self.domains) + 1, 11):\n getattr(self.dlg, \"uTextDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(entry)).hide()", "def create_domain(DomainName=None):\n pass", "def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def populate_domain_data(self, domain):\n self.domain_resolve(domain)\n domain_data = server.get_domain_data(domain)['data']['userdata']\n\n self.domain_data[domain] = self.domain_resolve(domain)\n\n if domain in self.domain_data.keys():\n try:\n self.domain_data[domain]['documentroot'] = domain_data['documentroot']\n self.domain_data[domain]['ip'] = domain_data['ip']\n except KeyError:\n self.domain_data[domain]['documentroot'] = \"No domain data found, admin should check\"\n self.domain_data[domain]['ip'] = \"No domain data found, admin should check\"", "def get_domain_config(self, defaultcfg, wireframe):\n\n dnsdata = dnslib.DNSRecord.parse(wireframe)\n dnsdomain = dnsdata.q.get_qname()\n\n for ruleset in globals.config.rules.match:\n if re.search(str(ruleset.domain), str(dnsdomain)):\n # domain config matches!\n return Box({**defaultcfg, **ruleset})\n\n return defaultcfg", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def loadSettings():\r\n try:\r\n settingsFile = open(sys.argv[1], \"r\")\r\n except IOError:\r\n logging.exception(\"Error opening settings.\")\r\n exitApp()\r\n \r\n settingStr = settingsFile.read()\r\n settingsFile.close()\r\n \r\n try:\r\n settings = json.loads(settingStr)\r\n except ValueError:\r\n logging.exception(\"Error parsing settings.\")\r\n exitApp()\r\n \r\n # Check integrity\r\n if (len(settings[\"reddit_username\"]) == 0):\r\n logging.critical(\"Reddit username not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_password\"]) == 0):\r\n logging.critical(\"Reddit password not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_subreddit\"]) == 0):\r\n logging.critical(\"Subreddit not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_ua\"]) == 0):\r\n logging.critical(\"Reddit bot user agent not set.\")\r\n exitApp()\r\n \r\n settings[\"repost_protection\"] = bool(settings[\"repost_protection\"])\r\n \r\n return settings", "def default_user_settings(self) -> pulumi.Input['DomainUserSettingsArgs']:\n return pulumi.get(self, \"default_user_settings\")", "def load_from_defaults(self):\n default_settings = import_module('mindinsight.conf.defaults')\n for setting in dir(default_settings):\n if setting.isupper():\n setattr(self, setting, getattr(default_settings, setting))\n self._default_settings.add(setting)", "def create_default_config():\n import codecs\n config = ConfigParser.SafeConfigParser()\n config.readfp(StringIO(DEFAULT_CONFIG))\n\n # Load user settings\n filename = get_user_config_filename()\n if not os.path.exists(filename):\n from wizard import setup_wizard\n setup_wizard(config)\n else:\n try:\n fi = codecs.open(filename, 'r', encoding='utf-8')\n config.readfp(fi)\n finally:\n fi.close()\n return config", "def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output", "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))", "def init_settings(self):\n if not os.path.exists(self.settingsFilePath):\n settings_dir = os.getenv(\"APPDATA\") + \"\\\\\" + qApp.applicationName()\n if not os.path.exists(settings_dir):\n os.makedirs(settings_dir)\n setting_path = \"\"\n if getattr(sys, 'frozen', False):\n setting_path = os.path.dirname(sys.executable)\n elif __file__:\n setting_path = os.path.dirname(__file__)\n shutil.copyfile(os.path.join(setting_path, \"resources\\eksettings.ini\"), self.settingsFilePath)\n return", "def findSettingsFile():\n settingsName = 'oct-fire-settings.json'\n userPath = os.path.expanduser('~')\n if os.path.exists(settingsName):\n return settingsName\n elif os.path.exists(os.path.join(userPath, settingsName)):\n return os.path.join(userPath, settingsName)\n elif os.path.exists(os.path.join(userPath, 'Desktop', settingsName)):\n return os.path.join(userPath, 'Desktop', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Documents', settingsName)):\n return os.path.join(userPath, 'Documents', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Downloads', settingsName)):\n return os.path.join(userPath, 'Downloads', settingsName)\n raise Exception('Could not locate settings file')", "def ini(filename, **defaults):\n filename = sh.path(filename)\n defaults.update(home=sh.path('~'))\n return ConfigObject(filename=filename, defaults=defaults)", "def __init__(self, settings, valid, defaults=None):\n\n try:\n with open(settings, 'r') as settings_file:\n self._settings = json.load(settings_file)\n except TypeError:\n self._settings = dict(settings)\n self._settings = Settings._inject_defaults(self._settings, defaults)\n Settings._validity_check(self._settings, valid)", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def set_domain_path(self):\n\n self.domain_path = os.path.join(self.docs_path, self.domain)\n if not os.path.exists(self.domain_path):\n os.makedirs(self.domain_path)", "def settings() -> Settings:\n return Settings()" ]
[ "0.5657144", "0.548896", "0.5319334", "0.5285291", "0.52768016", "0.5255273", "0.5222265", "0.5218223", "0.5189833", "0.5173989", "0.5170184", "0.51397663", "0.513596", "0.5086622", "0.50810987", "0.50810987", "0.50761354", "0.5024908", "0.5006228", "0.5000491", "0.5000396", "0.49986142", "0.4996676", "0.49652752", "0.49533844", "0.49051794", "0.48797652", "0.4865534", "0.48620728", "0.48614177" ]
0.7057614
0
Get the full name of the given keyName under the current group. If extant is True, only return extant keys; otherwise return None.
def __getKey(self, keyNameRaw, extant=True): fullKeyName = self.__DELIMITER.join( self.__currentGroupNames + [str(keyNameRaw)]) if extant and (fullKeyName not in self.__settings): return None return fullKeyName
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_name(self) -> Optional[str]:\n return pulumi.get(self, \"key_name\")", "def get_name(self):\n return self.key().name().split(':', 1)[1]", "def extract_key_name(self):\n # quick and dirty regex parsing..\n # consider using gnupg.\n _, out, _ = self.as_user('/usr/bin/gpg --list-keys')\n patterns = [\n 'pub\\s+.*?uid\\s+debrepo.*?sub\\s+\\w+/(\\w+)\\s+[\\w-]+$',\n '^pub.*?\\n\\s+(.*?)\\nuid',\n ]\n keyname = None\n out_str = out.decode('utf8')\n for pattern in patterns:\n m=re.search(pattern, out_str, flags=re.M|re.DOTALL)\n if m:\n keyname=m.group(1)\n break\n return keyname", "def kms_key_name(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_name\")", "def get_post_extra_content_key_name(obj, key_name=None):\n if obj:\n key, name = key_name.split(',')\n return obj.get_extra_content().filter(key__iexact=key, name__iexact=name)\n return ''", "def key_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_name\")", "def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")", "def get_name(self):\n return m2.x509_extension_get_name(self.x509_ext)", "def kms_key_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_name\")", "def _get_key_name(self, name):\n base_path = force_text(self.location)\n final_path = urljoin(base_path + \"/\", name)\n name = os.path.normpath(final_path.lstrip('/'))\n\n if six.PY2:\n name = name.encode('utf-8')\n return name", "def filename_for_key(self, key, extension=None):\n if extension is None:\n extension = self.file_extension\n f = self.key2basename(key) + extension\n return os.path.join(self.basepath, f)", "def actual_key(self, key):\n key_list = []\n if key.scope == Scope.children:\n key_list.append('children')\n elif key.scope == Scope.parent:\n key_list.append('parent')\n else:\n key_list.append([\"usage\", \"definition\", \"type\", \"all\"][key.scope.block])\n\n if key.block_scope_id is not None:\n key_list.append(key.block_scope_id)\n if key.student_id:\n key_list.append(key.student_id)\n return \".\".join(key_list)", "def GroupsExtension_getPackageName():\n return _libsbml.GroupsExtension_getPackageName()", "def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail", "def getKeyPath(self, keyPath):\n parent = self\n parts = keyPath.split(\".\")\n for part in parts[:-1]:\n child = parent.get(part, None)\n if child is None:\n return None\n parent = child\n return parent.get(parts[-1], None)", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def GetKey(self, obj, keyName):\n\n key = (self._configKey is None and [\"Persistence_Options\"] or [self._configKey])[0]\n\n key += CONFIG_PATH_SEPARATOR + obj.GetKind()\n key += CONFIG_PATH_SEPARATOR + obj.GetName()\n key += CONFIG_PATH_SEPARATOR + keyName\n\n return key", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])", "def get_key_recursive(lang_map, lang_code, key_name, default=None):\n key_val = lang_map.get(lang_code, {}).get(key_name, sentinel)\n\n if key_val is not sentinel:\n return key_val\n\n parts = lang_code.split('_')\n parts.pop()\n if not parts:\n return default\n\n _lang_code = '_'.join(parts)\n return get_key_recursive(lang_map, _lang_code, key_name, default)", "def sub_key(dirname):\n return SUB_PREFIX + dirname", "def key_pair_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"key_pair_name\")", "def key_name(self):\n return self._key_name", "def _key_name(self, key):\n if type(key) == type(\"\"):\n return str(curses.keyname(ord(key)).decode(\"utf-8\"))\n return False", "def fname(key):\n return key.rsplit(\"/\", 1)[-1]", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.61867285", "0.6151179", "0.59994215", "0.59087443", "0.59087443", "0.58050305", "0.57887447", "0.57743907", "0.57119745", "0.5711618", "0.56672543", "0.5649241", "0.5592001", "0.55778885", "0.55366206", "0.55205065", "0.55190045", "0.55060714", "0.55060714", "0.55060714", "0.54835165", "0.5469332", "0.5467664", "0.546364", "0.54382837", "0.53961414", "0.5376713", "0.5361389", "0.5351113", "0.5327895" ]
0.77827585
0
This function gets the total occurrences of words and syllables in the original Unicode Garshana corpus. To do this, it opens a .csv file with utf16 encoding, and splits on commans, expecting the line of sumerian text to be in the 8th column. Filters annotations from each line, and tracks the occurrence of each word and syllable. All combinations of unigrams, bigrams, and trigrams are treated as individual syllables.
def get_counts(data): word_count = {} syll_count = {} infile = data.corpus try: open_file = codecs.open(infile, 'r', encoding='utf-16') for line in open_file: line = line.lower() # Remove tablet indexing info and line numbers. Grab only text data line = line.split(',') text = clean_line(line[7]) # Update the occurrences of the words in the line for word in text.split(): count = word_count.setdefault(word, 0) word_count[word] = count + 1 # Track occurrences of syllables update_syllable_count(word, syll_count) open_file.close() except IOError: print("Cannot open: " + infile) return (word_count, syll_count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None", "def process():\n words = read_csv('american-words.80', header=None)\n def get_value(word):\n \"\"\"\n A sub-process run on each word. It gets the value of each letter, and add up the values for the whole word.\n \"\"\"\n letters = 'abcdefghijklmnopqrstuvwxyz'\n sum = 0\n for letter in word:\n letter_value = letters.find(letter)\n if letter_value == -1:\n letter_value = 0\n sum += letter_value\n return sum\n words['values'] = words[0].apply(get_value)\n # get those words whose values are 100\n words = words[words['values'] == 100]\n # get the length of these words and sort ascending\n words['length'] = words[0].apply(len)\n words = words.sort(columns='length')\n return words[0].values", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def main ():\n fio = FileIo(\"../input2.txt\")\n text = fio.getInput()\n p = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n out = filter(None, p.split(text))\n #print out[2]\n #print len(out)\n wc = 0\n\n for s in out:\n text = nltk.word_tokenize(s)\n wc += wordCount( text )\n print wc", "def extract_syllable_features_from_txt():\n input_files = sys.argv[1]\n csv_name = sys.argv[2]\n syllable_stats = pd.DataFrame(columns=SYLLABLE_COLUMNS)\n re_word = re.compile(r'[\\w-]+')\n i = 0\n for filename in os.listdir(input_files):\n if filename != '.DS_Store':\n print(filename, i)\n syllable_count = 0\n for line in open(input_files+filename):\n for word in re_word.findall(line):\n syllable_count += estimate(word)\n syllable_stats = syllable_stats.append({\n TRANSCRIPT_ID: filename[:-4],\n MEMORY_SYLLABLE_COUNT: syllable_count,\n }, ignore_index=True)\n i += 1\n syllable_stats = syllable_stats.set_index(TRANSCRIPT_ID)\n syllable_stats.to_csv(csv_name+'.csv')", "def count_words(filename):", "def analyze_embeddings(emb):\n dic = {\"Hi\": 0, \"En\": 1, \"Ot\": 2}\n count = [0, 0, 0, 0]\n count_zero = [0, 0, 0, 0]\n for i, j in zip(emb, corpus_trans):\n for k, l in zip(i, j):\n count[dic[l[1]]] += 1\n if sum(k) == 0:\n count_zero[dic[l[1]]] += 1\n count[-1] = sum(count)\n count_zero[-1] - sum(count_zero)\n print(\"hi, en, ot, total\")\n print(\"count: \", count)\n print(\"zero count: \", count_zero)", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def _count(self):\n words = [word.lower() for word in self.corpus.words()]\n bigrams_words = bigrams(words)\n for bigram in bigrams_words:\n self._bigrams[bigram] += 1", "def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }", "def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self", "def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def count_same_words(cuisine_file, menu):\n\n cuisine_list = separate_words(cuisine_file)\n \n same_word_count = 0\n \n for i in cuisine_list:\n for j in menu:\n if i == j:\n same_word_count += 1\n \n return same_word_count", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def count_words_and_dublicates(novel):" ]
[ "0.6335053", "0.57268775", "0.57179636", "0.5714977", "0.57052946", "0.5632471", "0.56277466", "0.56243765", "0.5612738", "0.5593558", "0.55885386", "0.5571546", "0.5560268", "0.55578226", "0.55549335", "0.55408674", "0.55253506", "0.5510573", "0.55039716", "0.5502862", "0.5502827", "0.5487121", "0.5435187", "0.5422129", "0.53818375", "0.5378401", "0.53673434", "0.5360341", "0.53580433", "0.53432876" ]
0.65279776
0
Returns the accounting period that is currently valid. Valid is an accounting_period when the current date lies between begin and end of the accounting_period
def get_current_valid_accounting_period(): current_valid_accounting_period = None for accounting_period in AccountingPeriod.objects.all(): if accounting_period.begin < date.today() and accounting_period.end > date.today(): return accounting_period if not current_valid_accounting_period: raise AccountingPeriodNotFound()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCurrentValidAccountingPeriod():\n currentValidAccountingPeriod = None\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():\n return accountingPeriod\n if currentValidAccountingPeriod == None:\n raise NoFeasableAccountingPeriodFound()", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def current_period(self):\n return self._current_period", "def get_all_prior_accounting_periods(target_accounting_period):\n accounting_periods = []\n for accounting_period in AccountingPeriod.objects.all():\n if accounting_period.end < target_accounting_period.begin:\n accounting_periods.append(accounting_period)\n if accounting_periods == []:\n raise AccountingPeriodNotFound(\"Accounting Period does not exist\")\n return accounting_periods", "def getAllPriorAccountingPeriods(targetAccountingPeriod):\n currentValidAccountingPeriod = None\n accountingPeriods = []\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.end < targetAccountingPeriod.begin:\n accountingPeriods.append(accountingPeriod)\n if accountingPeriods == []:\n raise NoPriorAccountingPeriodFound()\n return accountingPeriods", "def period(self):\n return self.__period", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def __get_period(self):\n return self.__period", "def get_chart_period(self,req):\n now=int(DATE())\n period=INT(req.period) # allow for it having been a string\n if period>9999: # assume it is a month\n if period<(now//100): # a valid complete previous month\n prior=True# this is a previous month\n else:\n period=now//100 # default to current month\n prior=False\n start=period*100+1\n end=self.nextperiod(period)*100+1\n else: # assume it is a year\n if period and (period<(now//10000)): # a prior year\n prior=True# this is a previous year\n else:\n##\n# period=now//100 # default to current month\n# prior=False\n# start=period*100+1\n# end=self.nextperiod(period)*100+1\n##\n period=now//10000 # default to current year\n prior=False\n start=period*10000+101\n end=self.nextperiod(period)*10000+101\n return period,start,end,prior", "def planning_period(self):\n return self._planning_period", "def getPeriod(self):\n return StripePeriod(self.base.get(\"period\", []))", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def get_interval(self):\n return self._period", "def get_period(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.PERIOD_INVALID\n res = self._period\n return res", "def get_period_guarantee_faithful_compliance(self):\n return ceiling(self.get_period_faithful_compliance, 3)", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def expected_last_period_end(self):\n return self._expected_last_period_end", "def valid_period(request):\n return request.param", "def _get_period(self, cr, uid, context={}):\n\n account_period_obj = self.pool.get('account.period')\n ids = account_period_obj.find(\n cr, uid, time.strftime('%Y-%m-%d'), context=context)\n period_id = ids[0]\n return period_id", "def real_period(self):\n return max(\n self.period * self.PERIOD_MARGIN_FACTOR -\n (self.max_lag if self.max_lag else self.lag * self.LAG_MARGIN_FACTOR),\n 0.0)", "def getBeginEnd(self):\n if (self.dr_type == choices.DATE_RANGE_TYPE_FIXED):\n return self.begin, self.end\n\n elif (self.dr_type == choices.DATE_RANGE_TYPE_VARIABLE):\n end = datetime.now()\n\n if (self.unit == choices.TIME_UNIT_DAY):\n begin = end - relativedelta(days=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_WEEK):\n begin = end - relativedelta(weeks=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_MONTH):\n begin = end - relativedelta(months=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_YEAR):\n begin = end - relativedelta(years=self.quantity)\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'unit' must be a numeric\"\n \" value in: {units}.\".format(units=\", \".join([\n \"{const} ({name})\".format(const=unit, name=unit_name)\n for unit, unit_name in choices.TIME_UNIT\n if unit is not None]))\n )\n\n return begin, end\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'dr_type' must be one of:\"\n \" {const_fixed} (fixed range) or {const_dynamic}\"\n \" (dynamic range).\".format(\n const_fixed=choices.DATE_RANGE_TYPE_FIXED,\n const_dynamic=choices.DATE_RANGE_TYPE_VARIABLE\n ))" ]
[ "0.82862264", "0.6872075", "0.65232825", "0.6370569", "0.6290718", "0.60823673", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6018042", "0.6012301", "0.5940794", "0.59097856", "0.5859212", "0.5821298", "0.5755713", "0.57361776", "0.57259804", "0.5627092", "0.5623561", "0.56206757", "0.5603892", "0.55797243", "0.557284", "0.551847" ]
0.8423247
0
Transform mp3 file into wav format calling bash and using mpg123 or ffmpeg.
def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'): if encoder == 'mpg123': bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file] else: bash_command = ['ffmpeg', '-i', mp3_file, wav_file] subprocess.run(bash_command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def extract_audio_from(file, out_dir=''):\n output_filename = f'{os.path.join(out_dir, os.path.basename(file)[:-4])}.wav'\n os.system(f'ffmpeg -i {file} {output_filename}')\n return output_filename", "def analyze_mp3(mp3filespec):\n \n # Make a temporary working directory for storing the wav file\n # that soundstretch should analyze\n wavfilespec = tempfile.NamedTemporaryFile(suffix='.wav') \n \n # Use lame to make a wav representation of the mp3 file to be analyzed\n wav_command = 'sox %s %s' % (mp3filespec, wavfilespec.name)\n subprocess.call([wav_command], shell=True, stderr=open(os.devnull, 'w'))\n \n # Call soundstretch to analyze the wav file\n bpm_command = 'soundstretch %s -bpm' % wavfilespec.name\n p = subprocess.Popen([bpm_command], shell=True,stdout=subprocess.PIPE)\n output = p.communicate()[0]\n \n # Delete temporary working directory and its contents\n #shutil.rmtree(workingdir)\n\n bpm_suggestion = _get_bpm_from_soundstretch(output)\n\n return fit_bpm_in_window(bpm_suggestion)", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def extract_audio(file_name, audio_directory):\n basename = os.path.splitext(os.path.basename(file_name))[0]\n audio_file_name = audio_directory + '/' + basename + '.wav'\n subprocess.call(['ffmpeg', '-y', '-i', file_name, '-ac', '1', audio_file_name])\n return audio_file_name", "def mp3_to_wav(song_dir, snip_dir, bird_list_path='bird_list.txt'):\n if os.path.exists(snip_dir):\n shutil.rmtree(snip_dir)\n os.makedirs(snip_dir)\n with open(bird_list_path) as f:\n lines = f.readlines()\n bird_list = [line.rstrip('\\n') for line in lines]\n # Build the bird-labeled subdirectories in 'snip_dir'.\n _make_bird_dirs(snip_dir, birds_list)\n # Populate the subdirectory with recordings converted from .mp3 to .wav.\n for f in os.listdir(song_dir):\n bird = extract_bird_name(f)\n if bird in birds_list:\n index = birds_list.index(bird)\n wav_filename = os.path.splitext(f)[0].replace(' ', '_') + '.wav'\n orig = os.path.join(mp3_dir, f)\n new = os.path.join(snip_dir, str(index), wav_filename)\n # MP3-to-WAV conversion requires the ffmpeg package.\n call([\"ffmpeg\", \"-i\", orig, new])", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def convert_mp3_to_ogg(self, filename: str):\n mp3_path = os.path.join(self.directory, filename)\n ogg_path = mp3_path.replace(self.extension_mp3, self.extension_ogg)\n if os.path.isfile(ogg_path):\n # already done\n return\n command = [FFMPEG_BIN, '-i', mp3_path, ogg_path]\n pipe = sp.Popen(command, shell=False, stdout=sp.PIPE)\n pipe.wait()", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def mp3ogg(fname, datas):\n oggname = \"%s.ogg\" % fname[:-4]\n logger.info(\"(mp3ogg) encode [%s]\" % fname)\n logger.debug(\"(mp3ogg) oggenc binary path %s\" % settings.OGGENC)\n logger.debug(\"(mp3ogg) mpg123 binary path %s\" % settings.MPG123)\n\n command = [settings.OGGENC,\n \"--artist\", datas['artist'],\n \"--title\", datas['title'],\n \"--album\", datas['album'],\n \"--genre\", datas['genre'],\n \"--date\", datas['date'],\n \"--tracknum\", datas['tracknumber'],\n \"-o\", oggname,\n \"-\"]\n\n try:\n mpg = subprocess.Popen([settings.MPG123,\n \"-w\",\n \"-\",\n fname],\n stdout=subprocess.PIPE)\n\n ogg = subprocess.Popen(command,\n stdin=mpg.stdout, # pylint: disable-msg=E1101\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (stdout, stderr) = ogg.communicate()\n logger.debug(stdout)\n logger.error(stderr)\n result = oggname\n except:\n logger.error(\"(mp3ogg) subprocess failed on [%s]\" % fname)\n result = None\n\n if result:\n os.unlink(fname)\n\n return result", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def convert_to_wav(txt_file, sph_path, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(txt_file)\n\n def process(x):\n file_path = x[\"audio_file\"]\n text = x[\"transcription\"]\n start_time = x[\"start_time\"]\n duration = x[\"end_time\"] - start_time\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_name = str(start_time) + \"_\" + str(duration) + file_name\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}\".format(\n os.path.join(path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + \".wav\"),\n start_time,\n duration)\n subprocess.call([cmd], shell=True)\n print('Converting wav to wav for {}.'.format(txt_file))\n # generate processed data\n data = read_transcription_file(txt_file, sph_path)\n with ThreadPool(10) as pool:\n pool.map(process, data)", "def check_wav(song, source_folder, temp_folder, encoder='mpg123'):\n # Name of files\n song_name, extension = os.path.splitext(song)\n mp3_file = os.path.join(source_folder, song)\n if '.wav' != extension:\n wav_file = os.path.join(temp_folder, song_name + '.wav')\n try:\n if not os.path.isfile(wav_file):\n mp3_to_wav(\n mp3_file=mp3_file,\n wav_file=wav_file,\n encoder=encoder)\n else:\n pass\n except MemoryError:\n logger.error('MemoryError: %s MP3 couldn\\'t be transformed into WAV', song_name)\n else: # Already a wav file\n copyfile(mp3_file, os.path.join(temp_folder, song_name))", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def main(directory, wavelength=16000, replace=True):\n\n if os.path.isdir(directory):\n # get the directory of mp3 files\n mpthree_files = find_directory__files(directory, 'mp3')\n\n # check whether there are mp3 files\n if len(mpthree_files) > 0:\n # converts all the mp3 files to wav files\n map(lambda x: convert_mp3_to_wav(x, replace=replace), mpthree_files.values())\n\n # now get the wav files after conversion(if any)\n wav_files = find_directory__files(directory, 'wav')\n\n # convert\n map(lambda x: convert_wavelength_file(x, wavelength=wavelength, replace=replace), wav_files.values())\n elif os.path.isfile(directory):\n\n # check if it's a wav\n filetype = find_filetype(directory)\n if filetype != 'wav':\n if filetype == 'mp3':\n convert_mp3_to_wav(directory, replace=replace)\n # get the new file name\n directory = directory.replace('mp3', 'wav')\n else:\n raise ValueError(\"Not a supported filetype at this moment\")\n\n # when filetype == wav or after converting from mp3 to wav\n convert_wavelength_file(directory, wavelength, replace=replace)\n else:\n raise ValueError(\"input is wrong\")", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def encodeMP3(self, wavf: str, dstf: str, cover: str, meta: TrackMeta) -> None:\n FNULL = open(os.devnull, 'w')\n subprocess.call(['lame', '-V2', wavf, dstf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n # tag MP3\n mm = TrackMeta(meta)\n mp3 = MP3(dstf, ID3=ID3)\n mp3[\"TIT2\"] = TIT2(encoding=3, text=mm.title())\n mp3[\"TPE1\"] = TPE1(encoding=3, text=mm.artist())\n mp3[\"TALB\"] = TALB(encoding=3, text=mm.album())\n mp3[\"TPE2\"] = TPE2(encoding=3, text=mm.albumartist())\n if mm.date():\n mp3[\"TDRC\"] = TDRC(encoding=3, text=mm.date())\n mp3[\"TRCK\"] = TRCK(encoding=3,\n text=mm.tracknumber() + \"/\" + mm.tracktotal())\n mp3[\"TPOS\"] = TPOS(encoding=3,\n text=mm.discnumber() + \"/\" + mm.disctotal())\n\n # composer\n if mm.composer():\n mp3[\"TCM\"] = TCM(encoding=3, text=mm.composer())\n\n # cover\n if cover:\n data = open(cover, 'rb').read()\n if cover.endswith('png'):\n mime = 'image/png'\n else:\n mime = 'image/jpeg'\n mp3.tags.add(APIC(encoding=3, mime=mime, type=3, desc=u'Cover', data=data))\n\n # save\n mp3.save()", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio" ]
[ "0.73525465", "0.7225452", "0.7119044", "0.70570403", "0.67239785", "0.6696764", "0.6579686", "0.6547333", "0.6539131", "0.6505683", "0.6481485", "0.64209044", "0.6362076", "0.6354701", "0.632691", "0.6275299", "0.62359387", "0.6216278", "0.6215746", "0.61728823", "0.6155799", "0.6154977", "0.61340415", "0.6125287", "0.61050206", "0.60945886", "0.6072155", "0.60684216", "0.60684216", "0.60557544" ]
0.8660163
0
Limit arrays of frequency and features by maximum frequency and bottom frequency.
def limit_by_freq(freq, features, upper_limit, lower_limit=None): # Copy into arrays, in order to apply mask freq = np.array(freq, dtype=np.float) features = np.array(features, dtype=np.float) # Mask for bottom limit if lower_limit is not None: bottom_mask = freq >= lower_limit features = features[bottom_mask] freq = freq[bottom_mask] # Mask for upper limit upper_mask = freq <= upper_limit features = features[upper_mask] freq = freq[upper_mask] return freq, features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findMaximal(freqSet):", "def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols", "def fit_features(data, max_features):\n ndata = []\n for rec in data:\n rec = list(rec)\n if len(rec) > max_features:\n rec = rec[:max_features]\n elif len(rec) < max_features:\n rec = rec + (max_features - len(rec)) * [0.0]\n ndata.append(rec)\n return np.array(ndata)", "def calcUpperFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_upper = fls[0:max_idx] * (2.0 ** (1.0 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_upper / float(step_size)\n f_upper = np.round(approx_idx).astype(int)\n return f_upper", "def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass", "def max_frequency(sig, FS):\n\n f, fs = plotfft(sig, FS)\n t = np.cumsum(fs)\n\n try:\n ind_mag = np.where(t > t[-1]*0.95)[0][0]\n except:\n ind_mag = np.argmax(t)\n f_max = f[ind_mag]\n\n return f_max", "def filtermax(f, maxfiltsize=10):\n # Maximum filter to ignore deeper fluxes of absorption lines\n f_maxfilt = maximum_filter1d(f, size=maxfiltsize)\n # Find points selected by maximum filter\n idxmax = np.array([i for i in range(len(f)) if f[i]-f_maxfilt[i] == 0.])\n\n return f_maxfilt, idxmax", "def FoldChangeFilterBasedOnMaxFC(X, data_headers, cutoff=0.5):\n XX = Linear(X.copy(), data_headers)\n X_ToMin = XX[data_headers] / XX[data_headers].min(axis=0)\n Xidx = np.any(X_ToMin.values >= X_ToMin.max().values * cutoff, axis=1)\n return X.iloc[Xidx, :]", "def spec_to_peaks(data, value, fp = iterate_structure(generate_binary_structure(rank = 2, connectivity=2), 10)):\n\n max_arr = maximum_filter(data, footprint = fp)\n return (data == max_arr) & (data > value)", "def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j", "def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)", "def _compare_indices(self, frequency_array):\n frequency_array = np.absolute(frequency_array)\n max_frequency = np.max(frequency_array)\n return max_frequency", "def mode(x: List[float]) -> List[float]:\n counts = Counter(x)\n max_count = max(counts.values())\n return [x_i for (x_i, count) in counts.items() if count == max_count]", "def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def set_maximum(freq, rg = None):\n if isinstance(rg, int):\n rg = [rg]\n elif rg is None:\n rg = _cpu.get_online_cpus()\n\n for core in rg:\n try:\n _cpu.set_max_frequencies(freq, core)\n if _verbose:\n print(f\"CPU {core} maximum frequency set to {int(freq/1000)} MHz.\")\n except Exception as e:\n print(f\"ERROR: An exception occurred for CPU {core}.\")\n print(e)", "def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def temporal_ideal_filter(tensor,low,high,fps,axis=0): \n fft=fftpack.fft(tensor,axis=axis)\n frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n if (bound_low==bound_high) and (bound_high<len(fft)-1):\n bound_high+=1\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff=fftpack.ifft(fft, axis=axis)\n \n return np.abs(iff)", "def peak(data, fft_data=None):\n return np.max(np.abs(data))", "def remove_low_info(X, max_frequency=0.99):\n selector = UniqueThreshold(max_frequency=max_frequency)\n return selector.fit_transform(X)", "def exclude_largest(self):\n mask = np.copy(self.array)\n vols = [np.sum(p) for p in self]\n ilarge = np.argmax(vols)+1 # pore types are 1-indexed\n mask[self.labels == ilarge] = 0\n self.set_array(mask, structure=self._structure)", "def apply_freq_filter(self, min_freq):\n self._apply_filter(lambda ng, freq: freq < min_freq)", "def setUpperFrequencyBound(self, new_bound: int) -> None:\n self.upper_frequency_bound = new_bound", "def remove_exceeding_samples(states_accumulator,\n policy_accumulator,\n value_prior_accumulator,\n max_samples_per_result_to_train):\n for ires in range(len(states_accumulator)):\n if len(states_accumulator[ires]) > \\\n max_samples_per_result_to_train:\n diff = len(states_accumulator[ires]) - \\\n max_samples_per_result_to_train\n states_accumulator[ires] = \\\n states_accumulator[ires][diff:]\n policy_accumulator[ires] = \\\n policy_accumulator[ires][diff:]\n value_prior_accumulator[ires] = \\\n value_prior_accumulator[ires][diff:]\n return states_accumulator, policy_accumulator, value_prior_accumulator", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val", "def peak_enhance(signal, peaks, window: int = 0.08, fs: int = processing.FS):\n window = int(fs * window)\n if not window % 2 == 0:\n window += 1\n enhanced_peaks = np.zeros(len(peaks), dtype=int)\n signal = np.abs(signal)\n for i, peak in enumerate(peaks):\n if peak < window // 2:\n enhanced_peaks[i] = np.argmax(signal[0:peak + window // 2 + 1])\n elif peak + window // 2 + 1 > signal.shape[0]:\n enhanced_peaks[i] = np.argmax(signal[peak - window // 2:]) + peak - window // 2\n else:\n # Because of one-side lag -> window: p - w * 0.25% : p + w * 75%\n enhanced_peaks[i] = np.argmax(signal[peak - window // 4:peak + 3 * window // 4]) + peak - window // 4\n\n return enhanced_peaks", "def clean(data, N_peaks, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- clean')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n # Standard frequency resolution:\n T = data0[-1,0]-data[0,0]\n if f_resolution==None:\n f_resolution = 1/T\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Constants:\n SAMPLING = 1\n f_RES = 0.1*f_resolution # Standard frequency resolution\n picon = 2*np.pi*data0[:,0] # Optimization constant\n f_peaks = np.zeros(N_peaks)\n A_peaks = np.zeros(N_peaks)\n \n for i in range(N_peaks):\n k = i+1\n print '%s. Peak' %k\n\n # 1. Iteration - start finding largest peak:\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1]) # Smaller f_int (Tuple instead of array for optimization)\n\n # Testing that the frequency resolution > sigma_f to continue:\n A_peak = P[j]\n A_av = np.mean(np.sqrt(P))\n sigma_a = 0.8*A_av\n sigma_phi = sigma_a/A_peak\n sigma_f = np.sqrt(3)*sigma_phi/(np.pi*T)\n if f_RES>sigma_f: \n \n # 2. Iteration: uses now f_res and so on..\n Pf_power, _, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1])\n \n # 3. Iteration: last\n Pf_power, P_comp, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n fpicon = picon*f[j] # Optimization constant\n alpha = P_comp[:,0]; beta = P_comp[:,1]\n alpha0 = alpha[j]*np.sin(fpicon)\n beta0 = beta[j]* np.cos(fpicon)\n data0[:,1] = data0[:,1] - alpha0 - beta0\n f_peaks[i] = f[j]\n A_peaks[i] = np.sqrt(P[j])\n\n # Output:\n St_clean = data0\n print f_peaks, A_peaks\n return St_clean, f_peaks, A_peaks", "def lowpass_max_frequency(st, fn_fac=0.75, lp_max=40.0, config=None):\n if not st.passed:\n return st\n\n def _cap_lowpass(fc):\n freq_dict = tr.getParameter(\"corner_frequencies\")\n if freq_dict[\"lowpass\"] > fc:\n freq_dict[\"lowpass\"] = fc\n tr.setParameter(\"corner_frequencies\", freq_dict)\n\n for tr in st:\n if tr.passed:\n if tr.hasParameter(\"review\"):\n rdict = tr.getParameter(\"review\")\n if \"corner_frequencies\" in rdict:\n rev_fc_dict = rdict[\"corner_frequencies\"]\n if \"lowpass\" in rev_fc_dict:\n logging.warning(\n f\"Not applying lowpass_max_frequency for {tr} because the \"\n \"lowpass filter corner was set by manual review.\"\n )\n continue\n\n fn = 0.5 * tr.stats.sampling_rate\n lp_max_fn = fn * fn_fac\n _cap_lowpass(lp_max_fn)\n _cap_lowpass(lp_max)\n\n return st" ]
[ "0.6321469", "0.5905518", "0.5899971", "0.5845656", "0.5827781", "0.5711425", "0.55240804", "0.5505938", "0.54641736", "0.5451525", "0.5440867", "0.5408901", "0.5367295", "0.53634137", "0.5352045", "0.5324692", "0.53187746", "0.53122324", "0.5303038", "0.52815557", "0.5275447", "0.52573454", "0.52492553", "0.5232467", "0.52100176", "0.51902336", "0.5171352", "0.51594007", "0.51539475", "0.51530427" ]
0.72728205
0
Check if song is already transformed into temp.
def check_wav(song, source_folder, temp_folder, encoder='mpg123'): # Name of files song_name, extension = os.path.splitext(song) mp3_file = os.path.join(source_folder, song) if '.wav' != extension: wav_file = os.path.join(temp_folder, song_name + '.wav') try: if not os.path.isfile(wav_file): mp3_to_wav( mp3_file=mp3_file, wav_file=wav_file, encoder=encoder) else: pass except MemoryError: logger.error('MemoryError: %s MP3 couldn\'t be transformed into WAV', song_name) else: # Already a wav file copyfile(mp3_file, os.path.join(temp_folder, song_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isTemp(self,object):\n return (object in self.tempObjects)", "def test_transform_track_album_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def song_check(song):\n msg = choose_song(song)\n return msg != ERROR", "def test_transform_track_album_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 2',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, False)", "def test_apply_transform_single_album_no_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)", "def test_transform_track_title_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_single_track_with_transform(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.app.load_data()\n\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist 2', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)\n self.assertEqual(album.last_transform, tf_pk)", "def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)", "def test_conversion():\n file = 'Sherlock OST/SHERlocked.mp3'\n new_name = cr.SingleSong_conversion(file)\n assert new_name[0].split('/')[-1] in os.listdir(os.path.split(file)[0])", "def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)", "def test_no_transform_track_with_already_applied_transform(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def test_transform_track_album_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_no_transform_track_with_song_with_transform_id_greater(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)", "def test_no_transform_album_with_already_applied_transform(self):\n album = Album(artist='Artist', album='Album', last_transform=1)\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)\n\n tflist.apply_album(album)\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)", "def cut_and_eq(song_name):\r\n print(\"[{}] STATUS: Loading...\".format(song_name))\r\n sound_file = AudioSegment.from_mp3(song_name)\r\n print(\"[{}] STATUS: Loaded, now processing...\".format(song_name))\r\n sound_file = match_target_amplitude(sound_file, TARGET_VOLUME) # Amplify beforehand to prevent over-zealous cutting\r\n chunks = split_on_silence(sound_file, SILENCE_CUTOFF, THRESHOLD, keep_silence=ACCEPTABLE_SILENCE)\r\n\r\n if len(chunks) > 1:\r\n print(\"[{}] ERROR: Too many chunks ({}) cannot export\".format(song_name, len(chunks)))\r\n return song_name\r\n else:\r\n output = AudioSegment.empty()\r\n for chunk in chunks:\r\n output += chunk\r\n\r\n new_name = song_name.split(\".\")[0]\r\n print(\"[{}] STATUS: Processed, now exporting...\".format(song_name))\r\n metadata = mediainfo(song_name).get('TAG',{})\r\n output.export(OUTPUT_NAME_FORMAT.format(new_name), format=OUTPUT_FORMAT, tags=metadata)\r\n print(\"[{}] STATUS: Exported to {} - cleaned.{}\".format(song_name, new_name, OUTPUT_FORMAT))\r\n return None", "def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))", "def test_transform_track_title_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 3',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 3')\n self.assertEqual(track.transformed, False)", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def test_transform_track_title_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_album_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_artist_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile", "def test_apply_transform_single_track_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'New Artist')", "def test_transform_album_no_changes(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1, cond_artist=True, change_artist=True,\n pattern_artist='Foo', to_artist='Bar')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)", "def test_get_all_need_transform_one_track_another_already_applied(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', album='Album', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 2)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].pk, pk)", "def _alreadyProcessed(self, tiltseriesdata):\n\t\tseriesname = \"series%3d\" % (tiltseriesdata['number'])\n\t\tself._reloadDoneDict()\n\t\tif seriesname in self.donedict:\n\t\t\tif not self.stats['lastseries_skipped']:\n\t\t\t\tsys.stderr.write(\"skipping series\\n\")\n\t\t\telif self.stats['skipcount'] % 80 == 0:\n\t\t\t\tsys.stderr.write(\".\\n\")\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tself.stats['lastseries_skipped'] = True\n\t\t\tself.stats['skipcount'] += 1\n\t\t\tself.stats['count'] += 1\n\t\t\treturn True\n\t\telse:\n\t\t\tself.stats['waittime'] = 0\n\t\t\tif self.stats['lastseries_skipped']:\n\t\t\t\tapDisplay.printMsg(\"\\nskipped\"+str(self.stats['skipcount'])+\" series so far\")\n\t\t\tself.stats['lastseries_skipped']=False\n\t\t\treturn False\n\t\treturn False", "async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False", "def test_transform_track_title_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_album_album_based_on_artist_album_no_match_album(self):\n album = Album(artist='Artist', album='Album 3',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 3')\n self.assertEqual(album.transformed, False)" ]
[ "0.56140214", "0.54819727", "0.542196", "0.5414857", "0.5397536", "0.5393968", "0.5390524", "0.53878826", "0.53857505", "0.5385733", "0.5346858", "0.5340154", "0.53382695", "0.5315191", "0.5310992", "0.53011787", "0.52986276", "0.5296976", "0.5272015", "0.5269272", "0.5257539", "0.524134", "0.52334416", "0.5214529", "0.5213078", "0.5204291", "0.5201751", "0.5199436", "0.5190387", "0.5173247" ]
0.5619942
0
Return the free space in Gigabits.
def get_free_gb(): mem_info = get_mem_info() free_gb = float(mem_info['MemAvailable'].value) / 10**6 return free_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cgts_vg_free_space():\n\n try:\n # Determine space in cgts-vg in GiB\n vg_free_str = subprocess.check_output( # pylint: disable=not-callable\n ['vgdisplay', '-C', '--noheadings', '--nosuffix',\n '-o', 'vg_free', '--units', 'g', 'cgts-vg'],\n close_fds=True, universal_newlines=True).rstrip()\n cgts_vg_free = int(float(vg_free_str))\n except subprocess.CalledProcessError:\n LOG.error(\"Command vgdisplay failed\")\n raise Exception(\"Command vgdisplay failed\")\n\n return cgts_vg_free", "def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity", "def get_space_used():\n fs.get_space_used()", "def get_available_space(self):\n return self.maxsize - len(self)", "def gb(self):\n return self.data.gb", "def free_ram():\n return int(convert.bytetomb(psutil.virtual_memory().available))", "def _free_space() -> int:\n return disk_usage(realpath('/')).free", "def used_ram():\n return total_ram() - free_ram()", "def get_free(self):\n return int(self.free_cores)", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def get_free_mem(self):\n return self.free_mem", "def get_free(self):\r\n\t\treturn len(self.free_objects)", "def fs_total_reserved_space(self):\n return self._fs_total_reserved_space", "def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used", "def __get_free_system_gid(self):\n\n gid_min, gid_max = self.__get_system_group_gid_range()\n\n busy_gids = [x.gr_gid for x in grp.getgrall() if gid_min <= x.gr_gid <= gid_max]\n\n # find free system gid\n for gid in range(gid_min, gid_max + 1):\n if gid not in busy_gids:\n return gid", "def get_free_space(dirname):\n return psutil.disk_usage(dirname).free", "def count_free_gpus():\n return len(get_free_gpus())", "def calc_free_g(energies, temperatures):\n pass", "def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def get_free_ram():\n try:\n output = subprocess.check_output(['free', '-b']).decode(\"utf-8\")\n lines = output.splitlines()\n m = re.match(r'\\w+:' + '(\\s+(\\d+))'*6, lines[1])\n if m:\n return int(m.group(6))\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to determine free RAM, using 1GB\\n\")\n return 10**9", "def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def available_space(self):\r\n space = dict()\r\n for path in self._mounts.keys():\r\n space.update({path:self.available_space_for_path(path)})\r\n return space", "def gagged(self):\r\n return self._gag", "def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2", "def get_free_space(directory):\r\n if sys.platform in [\"win32\", \"cygwin\"]:\r\n free_bytes = ctypes.c_ulonglong(0)\r\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(directory),\r\n None, None, ctypes.pointer(free_bytes))\r\n space = free_bytes.value\r\n else:\r\n space = os.statvfs(directory).f_bfree * os.statvfs(directory).f_frsize\r\n\r\n return format_size(space)", "def memory(self):\n mem_size_list = []\n gig_size = self.random.randint(1,32)\n size = gig_size * 1073741824\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n mem_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\")\n return mem_size_list", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def getSpace(self):\n return self.space" ]
[ "0.7192137", "0.6909616", "0.690619", "0.68786997", "0.6813705", "0.67277247", "0.66669893", "0.6577275", "0.6504887", "0.65012854", "0.64604944", "0.6451814", "0.6396979", "0.63525146", "0.63032967", "0.6267119", "0.6259522", "0.62487036", "0.6246622", "0.62347066", "0.6206821", "0.6199555", "0.6154595", "0.6153875", "0.615194", "0.6122632", "0.60965425", "0.6084616", "0.6078471", "0.60578483" ]
0.75012624
0
True if it can't run, else otherwise. Condition is Gb of RAM memory available.
def ram_condition(min_gb=3): return get_free_gb() < min_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\n cur_rss = self.mem_status.memory_info().rss\n\n if cur_rss < self.init_mem_rss:\n self.init_mem_rss = cur_rss\n estimated_model_size_mb = (cur_rss - self.init_mem_rss) >> 20\n available_mb = available_bytes >> 20\n model_size_memory_ratio = estimated_model_size_mb / available_mb\n\n early_stop = False\n if model_size_memory_ratio > 1.0:\n logger.warning(f'Warning: Large model size may cause OOM error if training continues')\n early_stop = True\n\n if available_mb < 512: # Less than 500 MB\n logger.warning(f'Warning: Low available memory may cause OOM error if training continues')\n early_stop = True\n\n if early_stop:\n logger.warning('Warning: Early stopped model prior to optimal result to avoid OOM error. '\n 'Please increase available memory to avoid subpar model quality.')\n logger.warning(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return True\n elif self.verbose or (model_size_memory_ratio > 0.25):\n logging.debug(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return False", "def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)", "def check_available_memory(self,unit='B'):\n free = psutil.virtual_memory().available\n\n if unit == 'MB':\n\n return free/10**6\n\n elif unit == 'GB':\n\n return free/10**9\n\n else:\n\n return free", "def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")", "def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def device_out_of_memory(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_out_of_memory\")", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def check_mem_usage():\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n return mem", "def mem_avail():\n return psutil.virtual_memory().available", "def is_out_of_memory(self):\n\n return self._state == \"OUT_OF_MEMORY\"", "def testExcessiveRamUsage(self):\n c = Simulation()\n c.set_simulation_parameters(\n seed=1,\n task=36,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=100000000000,\n sample_size=0.1,\n max_time=10,\n )\n c.set_map_files(sample_file=\"sample/large_mask.tif\", fine_file=\"sample/large_fine.tif\")\n with self.assertRaises(MemoryError):\n c.optimise_ram(ram_limit=16)", "def hasmem(state, mem):\n if mem <= state[HEAD][MEM]:\n return True\n else:\n state[HEAD][STATUS] = OOM\n return False", "def _handle_not_enough_memory(self, calculation):\n\n if not self.ctx.can_be_optimised:\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('I am not allowed to optimize your settings. Consider providing at least'\n 'num_machines and num_mpiprocs_per_machine')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION)\n\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report('Calculation failed due to lack of memory, I resubmit it with twice larger'\n ' amount of computational nodes and smaller MPI/OMP ratio')\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2\n\n status = self.check_kpts()\n if status is not None:\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES)\n\n if 'settings' not in self.ctx.inputs:\n settings = {}\n else:\n settings = self.ctx.inputs.settings.get_dict()\n settings.setdefault('remove_from_remotecopy_list', [])\n if 'mixing_history*' not in settings['remove_from_remotecopy_list']:\n settings['remove_from_remotecopy_list'].append('mixing_history*')\n self.ctx.inputs.settings = orm.Dict(dict=settings)\n\n #check if the cdn.hdf can be reused\n #Out of memory can also occur after a couple of iterations if the mixing_history gets too large\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def ram_prop_condition(prop=0.25):\n mem_info = get_mem_info()\n total_mem = float(mem_info['MemTotal'].value) / 10**6\n min_gb = prop * total_mem\n return ram_condition(min_gb=min_gb)", "def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)", "def check_free_space(environment, target_xy, fovea):\n temp_image = check_target_position(environment, target_xy, fovea)\n if np.array_equal(temp_image, np.zeros(temp_image.shape)):\n return True\n else:\n return False", "def check(self, runtime):\n return True", "def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0", "def is_available_while_running(cls) -> bool:\n\n return True", "def __check_memory_limit(self, efile_path):\n try:\n log.debug('Checking %s for exceeded memory message from SLURM', efile_path)\n with open(efile_path) as f:\n if os.path.getsize(efile_path) > 2048:\n f.seek(-2048, os.SEEK_END)\n f.readline()\n for line in f.readlines():\n stripped_line = line.strip()\n if stripped_line == SLURM_MEMORY_LIMIT_EXCEEDED_MSG:\n return OUT_OF_MEMORY_MSG\n elif any(_ in stripped_line for _ in SLURM_MEMORY_LIMIT_EXCEEDED_PARTIAL_WARNINGS):\n return PROBABLY_OUT_OF_MEMORY_MSG\n except Exception:\n log.exception('Error reading end of %s:', efile_path)\n\n return False", "def allocate(self) -> bool:\n if hasattr(self.at_options, 'allocate'):\n return self.at_options.allocate == 1\n return False", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def checkmem(self,file_,line_): # 3\n res = self.__obj.checkmemtask(file_,line_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def test_disk_space_required_zero_if_no_vm(self):\n self.assertEqual(self.command.working_dir_disk_space_required(), 0)" ]
[ "0.7269116", "0.68143415", "0.66817755", "0.64842796", "0.6442835", "0.6359018", "0.6243431", "0.6243431", "0.62134373", "0.61970484", "0.58983666", "0.58968914", "0.5888447", "0.5873215", "0.58724576", "0.57852805", "0.5777731", "0.57357985", "0.57344395", "0.5731109", "0.5679409", "0.56775975", "0.56675816", "0.5660551", "0.56556684", "0.564799", "0.5594359", "0.5590541", "0.5519973", "0.55175686" ]
0.6852558
1
The camera that took the image
def camera(self): return self.__camera
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera(self):\n return self._camera", "def camera(self):\n return self._camera", "def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None", "def current_camera(self):\n n = ct.c_long() # current camera handler\n self.lib.GetCurrentCamera(ct.pointer(n))\n return n.value", "def get_image(self):\n return self.camera.getImage()", "def camera_id(self):\n return self._camera_id", "def camera_id(self):\n return self._camera_id", "def camera_image(self):\n if not self.ezvizService.switchState:\n return \"\"\n\n now = time.time()\n if now < self._last_snapshot_time + self._interval_snapshots:\n return self._last_image\n\n result = self.ezvizService.post('/lapp/device/capture', data={'deviceSerial':self.deviceSerial,'channelNo':1})\n if (result['code']!='200'):\n _LOGGER.error(\"EZVIZ capture image fail:%s\", result)\n return self._last_image\n\n image_path = result['data']['picUrl']\n try:\n response = requests.get(image_path)\n except requests.exceptions.RequestException as error:\n _LOGGER.error(\"EZVIZ getting camera image: %s\", error)\n return self._last_image\n\n self._last_snapshot_time = now\n self._last_image = response.content\n return self._last_image", "def get_image():\n\n # Access the global variable and activate the saving for the last camera's\n # frame\n global _save_image\n _save_image = True", "def snapshot(self):\n return self.camera.snapshot(0)", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def camera_entity(self):\n return self._camera_entity", "def camera_image(self):\n return asyncio.run_coroutine_threadsafe(\n self.async_camera_image(), self.hass.loop\n ).result()", "def camera_entity(self):\n return self._camera_entity_id", "def get_camera_metadata(self):\n return self.camera.getHeight(), self.camera.getWidth(), 4 # channels", "def camera_image(self):\n now = utcnow()\n if self._ready_for_snapshot(now) or True:\n image = self._device.camera_get_image(self._uuid, now)\n\n self._next_snapshot_at = now + self._time_between_snapshots\n self._last_image = image\n\n return self._last_image", "def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image", "def bspb_getCurrentCam():\n curPanel = pm.windows.getPanel(wf=True)\n if curPanel.startswith('modelPanel'):\n currentCamera = str(pm.windows.modelEditor(curPanel, q=True, camera=True))\n if currentCamera == 'shot_cam' or currentCamera == 'shot_camShape':\n return 'shot_cam'\n else:\n return 'Valid Camera is not selected.'\n else:\n return 'Please Select shot_cam viewport.'", "def _get_camera(self):\n rect = (self._dim[0], self._dim[2], self._dim[1] - self._dim[0],\n self._dim[3] - self._dim[2])\n flip = (False, type(self).__name__ == 'ImageObj', False)\n return scene.cameras.PanZoomCamera(rect=rect, flip=flip)", "def snapFrame(camera):\n return camera.read()[1]", "def get_camera_feed(self):\r\n # get the frame..from cam feed\r\n read_status, self.frame = self.capture.read()\r\n return self.frame", "def get_frame(self):\n BaseCamera.last_access = time.time()\n\n # wait for a signal from the camera thread\n BaseCamera.event.wait()\n BaseCamera.event.clear()\n\n return BaseCamera.frame", "def camera(self):\n self.spectrum = self.spectrum", "def capture_image():\n global img_tk\n r, img_cam = cam.read()\n img_pil = Image.fromarray(cv2.cvtColor(img_cam, cv2.COLOR_BGR2RGB))\n img_tk = ImageTk.PhotoImage(img_pil)\n tk_cam.create_image(0, 0, image=img_tk, anchor='nw')\n return img_pil", "def getFrame(self):\n s, image = self.capture.read()\n return image", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def model(self) -> CameraModel:\n pass", "def __get_img(self):\n # Read camera image\n while True:\n # Wait for prediction\n if not self.__predict_start:\n continue\n\n # Get current frame and\n # check for success\n success, self.__img = self.__cap.read()\n if not success:\n continue\n\n self.__img = cv2.resize(self.__img, (self.__size[0], self.__size[1]))" ]
[ "0.8029401", "0.8029401", "0.7391439", "0.7336508", "0.7180209", "0.711195", "0.711195", "0.71046424", "0.70813036", "0.70528156", "0.69937193", "0.69860065", "0.69809467", "0.68731916", "0.6841381", "0.6825675", "0.68190277", "0.680215", "0.6788457", "0.6687622", "0.66858554", "0.6659736", "0.66563463", "0.6616539", "0.6616073", "0.6568658", "0.65509355", "0.6548425", "0.64725703", "0.6465223" ]
0.81577206
0
r""" Property for the exterior orientation parameters
def exteriorOrientationParameters(self): return self.__exteriorOrientationParameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)", "def get_orientation(self):\r\n return self.__orientation", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def orientation(self):\n return self._orientation", "def orientation(self):\n return self._orientation", "def getOrientation(self):\r\n return self.orientation", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def give_orientation(pose, orr_array):\n pose.orientation.x = orr_array[0]\n pose.orientation.y = orr_array[1]\n pose.orientation.z = orr_array[2]\n pose.orientation.w = orr_array[3]", "def orientation(self) -> str:\n return self._widget._mgui_get_orientation()", "def get_orientation(self):\n return self._orientation", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)", "def __init__(self):\n self.degrees = 60.0\n self.aspect_ratio = 1.0\n self.front_pane = 0.1\n self.back_pane = 100.0", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def GetOrientation(self):\r\n\r\n return self.orientation", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def GetOrientation(self):\n return self._orient", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def set_orientation(self, val):\n self._orientation = val", "def GetToolOrientation(self):\r\n\r\n return self._tool_orientation", "def __init__(self):\n self.rot_axis = 1", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def yy(self):\n return self.exterior[:, 1]", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def screen_orientation(self):\n # type: () -> string_types\n return self._screen_orientation" ]
[ "0.61678284", "0.61665916", "0.61640745", "0.60078", "0.60078", "0.5891339", "0.5885623", "0.58542037", "0.5838997", "0.5836636", "0.5822891", "0.58147955", "0.5795377", "0.57695895", "0.57024115", "0.56947505", "0.56763643", "0.5612986", "0.5593346", "0.5587369", "0.5578711", "0.5572987", "0.5562875", "0.55135816", "0.5508962", "0.5501228", "0.5457921", "0.5444679", "0.5438654", "0.5415286" ]
0.80023855
0
The rotation matrix of the image Relates to the exterior orientation
def rotationMatrix(self): R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4], self.exteriorOrientationParameters[5]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def matrix(self):\n return self._rotation", "def rotation(self):\n return self.transform.getRotation() + [0]", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotated_image(image):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n return image.rotate(orientation)", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n\n return self._rotation", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotation_angle(self):\n return self.container['rotation_angle']", "def relativeRotation(self):\n return self.rotation()", "def rotate_image(image):\n return tf.image.rot90(image)", "def rotation(self) -> CameraRotationType:\n return self._rotation", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def camera_rotation(self) -> CameraRotationType:\n return self._rotation", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix" ]
[ "0.7759922", "0.77279866", "0.7530842", "0.7502491", "0.7206852", "0.71904266", "0.7162689", "0.71563435", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7005358", "0.6981667", "0.6911321", "0.68793875", "0.68270576", "0.679283", "0.67912734", "0.6790262", "0.67638195", "0.67603487", "0.67526007" ]
0.77768993
0
r""" Compute inner orientation parameters
def ComputeInnerOrientation(self, imagePoints): # implementing observation vectors imagePoints = imagePoints.reshape(np.size(imagePoints), 1) fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1) n = int(len(imagePoints)) # number of observations u = 6 # 6 orientation parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(imagePoints)): if i % 2 == 0: A[i, 0] = 1; A[i, 1] = 0; A[i, 2] = fMarks[j]; A[i, 3] = fMarks[j + 1]; A[i, 4] = 0 A[i, 5] = 0 else: A[i, 0] = 0; A[i, 1] = 1; A[i, 2] = 0; A[i, 3] = 0; A[i, 4] = fMarks[j]; A[i, 5] = fMarks[j + 1] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints)) v = np.dot(A, X) - imagePoints adjustment_results = {"params": X, "residuals": v, "N": np.dot(np.transpose(A), A)} self.__innerOrientationParameters = X # updating the inner orientation params return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def P(self):\n self.eigenmatrix()", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def orientation(a:tuple, b:tuple, c:tuple)->int:\n d = direction(a, b, c)\n if d == 0:\n return 0\n elif d > 0:\n return 1\n else:\n return -1", "def outer_rad(self):\n return self._outer_rad", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def init_axis(self):\n # Shorthand:\n nphi = self.nphi\n nfp = self.nfp\n\n phi = np.linspace(0, 2 * np.pi / nfp, nphi, endpoint=False)\n d_phi = phi[1] - phi[0]\n R0 = np.zeros(nphi)\n Z0 = np.zeros(nphi)\n R0p = np.zeros(nphi)\n Z0p = np.zeros(nphi)\n R0pp = np.zeros(nphi)\n Z0pp = np.zeros(nphi)\n R0ppp = np.zeros(nphi)\n Z0ppp = np.zeros(nphi)\n for jn in range(0, self.nfourier):\n n = jn * nfp\n sinangle = np.sin(n * phi)\n cosangle = np.cos(n * phi)\n R0 += self.rc[jn] * cosangle + self.rs[jn] * sinangle\n Z0 += self.zc[jn] * cosangle + self.zs[jn] * sinangle\n R0p += self.rc[jn] * (-n * sinangle) + self.rs[jn] * (n * cosangle)\n Z0p += self.zc[jn] * (-n * sinangle) + self.zs[jn] * (n * cosangle)\n R0pp += self.rc[jn] * (-n * n * cosangle) + self.rs[jn] * (-n * n * sinangle)\n Z0pp += self.zc[jn] * (-n * n * cosangle) + self.zs[jn] * (-n * n * sinangle)\n R0ppp += self.rc[jn] * (n * n * n * sinangle) + self.rs[jn] * (-n * n * n * cosangle)\n Z0ppp += self.zc[jn] * (n * n * n * sinangle) + self.zs[jn] * (-n * n * n * cosangle)\n\n d_l_d_phi = np.sqrt(R0 * R0 + R0p * R0p + Z0p * Z0p)\n d2_l_d_phi2 = (R0 * R0p + R0p * R0pp + Z0p * Z0pp) / d_l_d_phi\n B0_over_abs_G0 = nphi / np.sum(d_l_d_phi)\n abs_G0_over_B0 = 1 / B0_over_abs_G0\n self.d_l_d_varphi = abs_G0_over_B0\n G0 = self.sG * abs_G0_over_B0 * self.B0\n\n # For these next arrays, the first dimension is phi, and the 2nd dimension is (R, phi, Z).\n d_r_d_phi_cylindrical = np.array([R0p, R0, Z0p]).transpose()\n d2_r_d_phi2_cylindrical = np.array([R0pp - R0, 2 * R0p, Z0pp]).transpose()\n d3_r_d_phi3_cylindrical = np.array([R0ppp - 3 * R0p, 3 * R0pp - R0, Z0ppp]).transpose()\n\n tangent_cylindrical = np.zeros((nphi, 3))\n d_tangent_d_l_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n tangent_cylindrical[:,j] = d_r_d_phi_cylindrical[:,j] / d_l_d_phi\n d_tangent_d_l_cylindrical[:,j] = (-d_r_d_phi_cylindrical[:,j] * d2_l_d_phi2 / d_l_d_phi \\\n + d2_r_d_phi2_cylindrical[:,j]) / (d_l_d_phi * d_l_d_phi)\n\n curvature = np.sqrt(d_tangent_d_l_cylindrical[:,0] * d_tangent_d_l_cylindrical[:,0] + \\\n d_tangent_d_l_cylindrical[:,1] * d_tangent_d_l_cylindrical[:,1] + \\\n d_tangent_d_l_cylindrical[:,2] * d_tangent_d_l_cylindrical[:,2])\n\n axis_length = np.sum(d_l_d_phi) * d_phi * nfp\n rms_curvature = np.sqrt((np.sum(curvature * curvature * d_l_d_phi) * d_phi * nfp) / axis_length)\n mean_of_R = np.sum(R0 * d_l_d_phi) * d_phi * nfp / axis_length\n mean_of_Z = np.sum(Z0 * d_l_d_phi) * d_phi * nfp / axis_length\n standard_deviation_of_R = np.sqrt(np.sum((R0 - mean_of_R) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n standard_deviation_of_Z = np.sqrt(np.sum((Z0 - mean_of_Z) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n\n normal_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n normal_cylindrical[:,j] = d_tangent_d_l_cylindrical[:,j] / curvature\n self.normal_cylindrical = normal_cylindrical\n self._determine_helicity()\n\n # b = t x n\n binormal_cylindrical = np.zeros((nphi, 3))\n binormal_cylindrical[:,0] = tangent_cylindrical[:,1] * normal_cylindrical[:,2] - tangent_cylindrical[:,2] * normal_cylindrical[:,1]\n binormal_cylindrical[:,1] = tangent_cylindrical[:,2] * normal_cylindrical[:,0] - tangent_cylindrical[:,0] * normal_cylindrical[:,2]\n binormal_cylindrical[:,2] = tangent_cylindrical[:,0] * normal_cylindrical[:,1] - tangent_cylindrical[:,1] * normal_cylindrical[:,0]\n\n # We use the same sign convention for torsion as the\n # Landreman-Sengupta-Plunk paper, wikipedia, and\n # mathworld.wolfram.com/Torsion.html. This sign convention is\n # opposite to Garren & Boozer's sign convention!\n torsion_numerator = (d_r_d_phi_cylindrical[:,0] * (d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,2] - d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,1]) \\\n + d_r_d_phi_cylindrical[:,1] * (d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,0] - d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,2]) \n + d_r_d_phi_cylindrical[:,2] * (d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,1] - d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,0]))\n\n torsion_denominator = (d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,2] - d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,1]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,0] - d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,2]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,1] - d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,0]) ** 2\n\n torsion = torsion_numerator / torsion_denominator\n\n self.etabar_squared_over_curvature_squared = self.etabar * self.etabar / (curvature * curvature)\n\n self.d_d_phi = spectral_diff_matrix(self.nphi, xmax=2 * np.pi / self.nfp)\n self.d_varphi_d_phi = B0_over_abs_G0 * d_l_d_phi\n self.d_d_varphi = np.zeros((nphi, nphi))\n for j in range(nphi):\n self.d_d_varphi[j,:] = self.d_d_phi[j,:] / self.d_varphi_d_phi[j]\n\n # Compute the Boozer toroidal angle:\n self.varphi = np.zeros(nphi)\n for j in range(1, nphi):\n # To get toroidal angle on the full mesh, we need d_l_d_phi on the half mesh.\n self.varphi[j] = self.varphi[j-1] + (d_l_d_phi[j-1] + d_l_d_phi[j])\n self.varphi = self.varphi * (0.5 * d_phi * 2 * np.pi / axis_length)\n\n # Add all results to self:\n self.phi = phi\n self.d_phi = d_phi\n self.R0 = R0\n self.Z0 = Z0\n self.R0p = R0p\n self.Z0p = Z0p\n self.R0pp = R0pp\n self.Z0pp = Z0pp\n self.R0ppp = R0ppp\n self.Z0ppp = Z0ppp\n self.G0 = G0\n self.d_l_d_phi = d_l_d_phi\n self.axis_length = axis_length\n self.curvature = curvature\n self.torsion = torsion\n self.X1s = np.zeros(nphi)\n self.X1c = self.etabar / curvature\n self.min_R0 = fourier_minimum(self.R0)\n self.tangent_cylindrical = tangent_cylindrical\n self.normal_cylindrical = normal_cylindrical \n self.binormal_cylindrical = binormal_cylindrical\n self.Bbar = self.spsi * self.B0\n self.abs_G0_over_B0 = abs_G0_over_B0\n\n # The output is not stellarator-symmetric if (1) R0s is nonzero,\n # (2) Z0c is nonzero, (3) sigma_initial is nonzero, or (B2s is\n # nonzero and order != 'r1')\n self.lasym = np.max(np.abs(self.rs)) > 0 or np.max(np.abs(self.zc)) > 0 \\\n or self.sigma0 != 0 or (self.order != 'r1' and self.B2s != 0)\n\n # Functions that converts a toroidal angle phi0 on the axis to the axis radial and vertical coordinates\n self.R0_func = self.convert_to_spline(sum([self.rc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.rs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.rc))]))\n self.Z0_func = self.convert_to_spline(sum([self.zc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.zs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.zs))]))\n\n # Spline interpolants for the cylindrical components of the Frenet-Serret frame:\n self.normal_R_spline = self.convert_to_spline(self.normal_cylindrical[:,0])\n self.normal_phi_spline = self.convert_to_spline(self.normal_cylindrical[:,1])\n self.normal_z_spline = self.convert_to_spline(self.normal_cylindrical[:,2])\n self.binormal_R_spline = self.convert_to_spline(self.binormal_cylindrical[:,0])\n self.binormal_phi_spline = self.convert_to_spline(self.binormal_cylindrical[:,1])\n self.binormal_z_spline = self.convert_to_spline(self.binormal_cylindrical[:,2])\n self.tangent_R_spline = self.convert_to_spline(self.tangent_cylindrical[:,0])\n self.tangent_phi_spline = self.convert_to_spline(self.tangent_cylindrical[:,1])\n self.tangent_z_spline = self.convert_to_spline(self.tangent_cylindrical[:,2])\n\n # Spline interpolant for nu = varphi - phi, used for plotting\n self.nu_spline = self.convert_to_spline(self.varphi - self.phi)", "def inner(Ax, Ay, Bx, By):\n return (Ax*Bx + Ay*By) / (Ax**2+Ay**2)**0.5 / (Bx**2+By**2)**0.5" ]
[ "0.687762", "0.6436624", "0.58278096", "0.578275", "0.57705504", "0.5755263", "0.5649262", "0.5623161", "0.5620293", "0.5616361", "0.55939484", "0.55838054", "0.5497484", "0.54887134", "0.54881567", "0.54828835", "0.54536176", "0.5438182", "0.5438182", "0.54221433", "0.5411174", "0.54104936", "0.53689116", "0.53688836", "0.53652114", "0.53632826", "0.5338338", "0.53027475", "0.5285858", "0.52843916" ]
0.7024464
0
Computes the geometric inner orientation parameters
def ComputeGeometricParameters(self): # extracting inner orientation params a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] # computing algebric params tx = a0; ty = b0 theta = np.arctan(b1 / b2) gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta))) sx = a1 * np.cos(theta) - a2 * np.sin(theta) sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma) return {"translationX": tx, "translationY": ty, "rotationAngle": np.rad2deg(theta), "scaleFactorX": sx, "scaleFactorY": sy, "shearAngle": np.rad2deg(gamma)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def IK_geometric(dh_params, pose):\n pass", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def greenhouse_orientation():\n \n # NEED TO CHECK THIS WITH COMPASS (OR IPHONE)\n orientation_angle = 90 # angle between east-west line and the length of the greenhouse (0-90 degree)\n orientation_angle = float(orientation_angle)", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def angle(z):", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def sivina(self):\n return (self.r + self.g + self.b) / 3", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y" ]
[ "0.6358747", "0.6289955", "0.626587", "0.6029137", "0.5908361", "0.5868645", "0.5784191", "0.57483953", "0.5706465", "0.5689375", "0.5624956", "0.5610132", "0.5609869", "0.560806", "0.5597778", "0.5587196", "0.5573857", "0.5553297", "0.55513024", "0.55361223", "0.5525677", "0.5524676", "0.5512775", "0.54997075", "0.54997075", "0.5473307", "0.5463591", "0.54411703", "0.5392926", "0.53810036" ]
0.740137
0
Computes the parameters of the inverse inner orientation transformation
def ComputeInverseInnerOrientation(self): a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) mat = la.inv(mat) return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def get_inverse_affine_param(affine_param,dim=3):\n\n affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)\n inverse_param = torch.zeros_like(affine_param.data).to(affine_param.device)\n for n in range(affine_param.shape[0]):\n tm_inv = torch.inverse(affine_param[n, :dim,:])\n inverse_param[n, :dim, :] = tm_inv\n inverse_param[n, dim, :] = - torch.matmul(tm_inv, affine_param[n, dim, :])\n inverse_param = inverse_param.contiguous().view(affine_param.shape[0], -1)\n return inverse_param", "def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def inverse(self, x, y):", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def inverse_transform(self, y: Array2D) -> Array2D:", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_GetInverse(self, *args)", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def complex_inverse(c1,cr):", "def affine_transform_inverse(np_transform):\n rotation = np_transform[:3, :3]\n translation = np_transform[:3, 3]\n rotation_inv = numpy.linalg.inv(rotation)\n translation_inv = -1 * numpy.dot(rotation_inv, translation)\n result = numpy.identity(4)\n result[:3, :3] = rotation_inv\n result[:3, 3] = translation_inv.flatten()\n return result", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)" ]
[ "0.63808596", "0.6343662", "0.63060564", "0.6252362", "0.61781716", "0.61693305", "0.61612785", "0.61262125", "0.61208826", "0.6098615", "0.609339", "0.60388464", "0.59626293", "0.59568155", "0.5946826", "0.5945765", "0.59148675", "0.5901364", "0.5894338", "0.5879606", "0.5866621", "0.5846758", "0.5840087", "0.5840087", "0.5840087", "0.5840087", "0.5840087", "0.5833871", "0.58016366", "0.5789425" ]
0.7558201
0
Transforms camera points to image points
def CameraToImage(self, cameraPoints): # setting up the required matrices a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] if np.isscalar(a0): R = np.array([[a1, a2], [b1, b2]]) T = np.array([[a0], [b0]]) else: R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) T = np.array([[a0[0]], [b0[0]]]) cameraPoints = cameraPoints.T # computing the transformation to the image system return (T + np.dot(R, cameraPoints)).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def ImageToCamera(self, imagePoints):\n inverse_pars = self.ComputeInverseInnerOrientation()\n imagePoints = imagePoints.T\n\n if imagePoints.size == 2:\n imagePoints = np.reshape(np.array(imagePoints), (np.size(imagePoints), 1))\n\n T = np.array([[inverse_pars[0]], [inverse_pars[1]]])\n R = np.array([[inverse_pars[2], inverse_pars[3]], [inverse_pars[4], inverse_pars[5]]])\n\n return (np.dot(R, imagePoints - T)).T", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def get_projections(self, points_in_camera_frame: ARRAY_LIKE,\n image: int = 0, temperature: Real = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n # ensure the input is an array\n points_in_camera_frame = np.asarray(points_in_camera_frame)\n\n # apply misalignment to the points\n if self.estimate_multiple_misalignments:\n if np.any(self.misalignment[image]): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment[image]).squeeze() @ \\\n points_in_camera_frame\n\n else:\n if np.any(self.misalignment): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment).squeeze() @ points_in_camera_frame\n\n # get the unitless image plane location\n pinhole_locations = points_in_camera_frame[:2] / points_in_camera_frame[2]\n\n # get the distorted image plane location\n image_locations = self.apply_distortion(pinhole_locations)\n\n # add the temperature based scaling\n image_locations *= self.get_temperature_scale(temperature)\n\n # get the pixel locations of the points, need to mess with transposes due to numpy broadcasting rules\n picture_locations = ((self.intrinsic_matrix[:, :2] @ image_locations).T + self.intrinsic_matrix[:, 2]).T\n\n return pinhole_locations, image_locations, picture_locations", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()" ]
[ "0.75200397", "0.7221418", "0.72199947", "0.71426326", "0.71409965", "0.7138931", "0.7127518", "0.7100937", "0.70733917", "0.6988374", "0.69729286", "0.69044685", "0.6830623", "0.6829222", "0.6822007", "0.6778279", "0.67171186", "0.6710748", "0.66177326", "0.661222", "0.655764", "0.6540855", "0.6531074", "0.6498859", "0.6474865", "0.6451168", "0.6437138", "0.6397588", "0.6393258", "0.6328641" ]
0.73480624
1
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon): # cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = imagePoints self.__ComputeApproximateVals(cameraPoints, groundPoints) l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) while la.norm(deltaX) > epsilon: l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) # compute residuals l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1)) v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1) if (np.size(A, 0) - np.size(deltaX)) != 0: sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX)) sigmaX = sig[0] * la.inv(N) else: sigmaX = None return [self.exteriorOrientationParameters, sigmaX, v]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def ComputeExteriorOrientation_RzRyRz(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.exteriorOrientationParameters[0:3] = np.dot(self.rotationMatrix_RzRyRz, self.exteriorOrientationParameters[0:3])\n self.exteriorOrientationParameters = np.add(self.exteriorOrientationParameters, np.random.normal(0, 0.01, self.exteriorOrientationParameters.shape))\n l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector_RzRyRz(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def __ComputeDesignMatrix(self, groundPoints):\n # initialization for readability\n omega = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T\n dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def __ComputeDesignMatrix_RzRyRz(self, groundPoints):\n # initialization for readability\n azimuth = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix_RzRyRz.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'azimuth').T\n dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def exterior_der(self):\n from sage.calculus.functional import diff\n from utilities import format_unop_txt, format_unop_latex\n from sage.tensor.modules.comp import CompFullyAntiSym\n from vectorframe import CoordFrame\n if self._exterior_derivative is None:\n # A new computation is necessary:\n fmodule = self._fmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n self._exterior_derivative = DiffFormParal(fmodule, \n self._tensor_rank+1, \n name=rname, \n latex_name=rlname)\n # 1/ List of all coordinate frames in which the components of self\n # are known\n coord_frames = []\n for frame in self._components:\n if isinstance(frame, CoordFrame):\n coord_frames.append(frame)\n if coord_frames == []:\n # A coordinate frame is searched, at the price of a change of\n # frame, priveleging the frame of the domain's default chart\n dom = self._domain\n def_coordf = dom._def_chart._frame\n for frame in self._components:\n if (frame, def_coordf) in dom._frame_changes:\n self.comp(def_coordf, from_basis=frame)\n coord_frames = [def_coordf]\n break\n if coord_frames == []:\n for chart in dom._atlas:\n if chart != dom._def_chart: # the case def_chart is treated above\n coordf = chart._frame\n for frame in self._components:\n if (frame, coordf) in dom._frame_changes:\n self.comp(coordf, from_basis=frame)\n coord_frames[coordf]\n break\n if coord_frames != []:\n break \n # 2/ The computation:\n for frame in coord_frames:\n chart = frame._chart\n sc = self._components[frame]\n dc = CompFullyAntiSym(fmodule._ring, frame, \n self._tensor_rank+1, \n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind, val in sc._comp.iteritems():\n for i in fmodule.irange():\n ind_d = (i,) + ind\n if len(ind_d) == len(set(ind_d)): \n # all indices are different\n dc[[ind_d]] += \\\n val.function_chart(chart).diff(i).scalar_field()\n self._exterior_derivative._components[frame] = dc\n return self._exterior_derivative", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def __ComputeObservationVector_RzRyRz(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix_RzRyRz.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def yy(self):\n return self.exterior[:, 1]", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def orientation(self) -> Orientation:\n # if orientation was passed in, use it\n if self._orientation is not None:\n return convert_to_enum(self._orientation, Orientation)\n\n # replace any dead pixels with median value\n temp_image = self.image.array.copy()\n temp_image[temp_image < np.median(temp_image)] = np.median(temp_image)\n\n # find \"range\" of 80 to 90th percentiles\n row_sum = np.sum(temp_image, 0)\n col_sum = np.sum(temp_image, 1)\n row80, row90 = np.percentile(row_sum, [85, 99])\n col80, col90 = np.percentile(col_sum, [85, 99])\n row_range = row90 - row80\n col_range = col90 - col80\n\n # The true picket side will have a greater difference in\n # percentiles than will the non-picket size.\n if row_range < col_range:\n orientation = Orientation.LEFT_RIGHT\n else:\n orientation = Orientation.UP_DOWN\n return orientation", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def num_deriv_exterior(\n cal: Calibration, cpar: ControlPar, dpos: float, dang: float, pos: vec3d\n):\n var = [\n cal.ext_par.x0,\n cal.ext_par.y0,\n cal.ext_par.z0,\n cal.ext_par.omega,\n cal.ext_par.phi,\n cal.ext_par.kappa,\n ]\n x_ders = np.zeros(6)\n y_ders = np.zeros(6)\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n xs, ys = img_coord(pos, cal, cpar.mm)\n\n for pd in range(6):\n step = dang if pd > 2 else dpos\n var[pd] += step\n\n if pd > 2:\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n xpd, ypd = img_coord(pos, cal, cpar.mm)\n x_ders[pd] = (xpd - xs) / step\n y_ders[pd] = (ypd - ys) / step\n\n var[pd] -= step\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n return (x_ders, y_ders)", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0" ]
[ "0.71254003", "0.68282026", "0.6420376", "0.61255026", "0.61024475", "0.5787343", "0.57387304", "0.5717728", "0.5698031", "0.567763", "0.5626359", "0.55450577", "0.5525408", "0.5507461", "0.54807997", "0.54278624", "0.5404268", "0.53900427", "0.5351375", "0.5299402", "0.5291824", "0.5286612", "0.5283092", "0.52719927", "0.5255926", "0.5250085", "0.5173937", "0.514478", "0.512267", "0.51066446" ]
0.69500554
1
Transforming ground points to image points
def GroundToImage(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix_RzRyRz\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def ImageToGround_GivenZ(self, imagePoints, Z_values):\n cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = cameraPoints.T\n pars = self.exteriorOrientationParameters\n X0 = pars[0]\n Y0 = pars[1]\n Z0 = pars[2]\n\n T = np.array([[X0], [Y0], [Z0]])\n\n omega = pars[3]\n phi = pars[4]\n kappa = pars[5]\n R = Compute3DRotationMatrix(omega, phi, kappa)\n\n f = self.camera.focalLength\n\n # allocating memory for return array\n groundPoints = []\n\n for i in range(len(cameraPoints[1])):\n camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)\n lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))\n\n X = X0 + lam * np.dot(R[0, :], camVec)\n Y = Y0 + lam * np.dot(R[1, :], camVec)\n\n xy = [X, Y, Z_values]\n groundPoints.append(xy)\n\n groundPoints = np.array(groundPoints)\n\n return groundPoints", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def transform(self, previousimage):", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def transform_images(img1,img2):", "def geo_transform(self):\n pass", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def get_point_coords_wrt_image(boxes_coords, point_coords):\n with torch.no_grad():\n point_coords_wrt_image = point_coords.clone()\n point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (\n boxes_coords[:, None, 2] - boxes_coords[:, None, 0]\n )\n point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (\n boxes_coords[:, None, 3] - boxes_coords[:, None, 1]\n )\n point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]\n point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]\n return point_coords_wrt_image", "def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def _convert_image_to_coordinates(self, vect) -> np.ndarray:\n xdim = vect.shape[0]\n ydim = vect.shape[1]\n\n # stride is used during averaging and length adjustment\n stride_x, stride_y = self._averaging, self._averaging\n\n # create empty vector of necessary shape\n # every \"pixel\" has 2 coordinates\n pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32)\n\n # create coordinate spacing for x-y\n # double the num of elements by doubling x sampling\n xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False)\n yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False)\n xv, yv = np.meshgrid(xspace, yspace)\n\n # assign coordinates (pos) to all pixels\n pos[:, 0] = xv.flatten()\n pos[:, 1] = yv.flatten()\n\n # pixel midpoints are the first x-values of positions\n midpt = np.zeros((xdim * ydim, 2), dtype=np.float32)\n midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2\n midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2\n\n # rotate coordinates about midpoint to represent angle and length\n pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n\n return pos", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3" ]
[ "0.647679", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.58082163", "0.58014", "0.5800616", "0.57892793", "0.5785864", "0.5767218", "0.5746882", "0.57395655", "0.5713253", "0.56932056" ]
0.7286386
0
Transforming ground points to image points
def GroundToImage_RzRyRz(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix_RzRyRz r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def ImageToGround_GivenZ(self, imagePoints, Z_values):\n cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = cameraPoints.T\n pars = self.exteriorOrientationParameters\n X0 = pars[0]\n Y0 = pars[1]\n Z0 = pars[2]\n\n T = np.array([[X0], [Y0], [Z0]])\n\n omega = pars[3]\n phi = pars[4]\n kappa = pars[5]\n R = Compute3DRotationMatrix(omega, phi, kappa)\n\n f = self.camera.focalLength\n\n # allocating memory for return array\n groundPoints = []\n\n for i in range(len(cameraPoints[1])):\n camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)\n lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))\n\n X = X0 + lam * np.dot(R[0, :], camVec)\n Y = Y0 + lam * np.dot(R[1, :], camVec)\n\n xy = [X, Y, Z_values]\n groundPoints.append(xy)\n\n groundPoints = np.array(groundPoints)\n\n return groundPoints", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def transform(self, previousimage):", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def transform_images(img1,img2):", "def geo_transform(self):\n pass", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def get_point_coords_wrt_image(boxes_coords, point_coords):\n with torch.no_grad():\n point_coords_wrt_image = point_coords.clone()\n point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (\n boxes_coords[:, None, 2] - boxes_coords[:, None, 0]\n )\n point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (\n boxes_coords[:, None, 3] - boxes_coords[:, None, 1]\n )\n point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]\n point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]\n return point_coords_wrt_image", "def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def _convert_image_to_coordinates(self, vect) -> np.ndarray:\n xdim = vect.shape[0]\n ydim = vect.shape[1]\n\n # stride is used during averaging and length adjustment\n stride_x, stride_y = self._averaging, self._averaging\n\n # create empty vector of necessary shape\n # every \"pixel\" has 2 coordinates\n pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32)\n\n # create coordinate spacing for x-y\n # double the num of elements by doubling x sampling\n xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False)\n yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False)\n xv, yv = np.meshgrid(xspace, yspace)\n\n # assign coordinates (pos) to all pixels\n pos[:, 0] = xv.flatten()\n pos[:, 1] = yv.flatten()\n\n # pixel midpoints are the first x-values of positions\n midpt = np.zeros((xdim * ydim, 2), dtype=np.float32)\n midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2\n midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2\n\n # rotate coordinates about midpoint to represent angle and length\n pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n\n return pos", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3" ]
[ "0.7286386", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.58082163", "0.58014", "0.5800616", "0.57892793", "0.5785864", "0.5767218", "0.5746882", "0.57395655", "0.5713253", "0.56932056" ]
0.647679
1
Transforms Image point to a Ray in world system
def ImageToRay(self, imagePoints): pass # delete after implementations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple:\n\n # ---OBJ--\n # x r1 /\\ r2 x\n # / \\\n #cam_ray / \\ average_ray\n # / \\\n # / \\\n # CAM ----> LID\n # \n\n # has to be 2d\n assert (img_pos.size == 2)\n\n cam_ray = camera.get_ray_through_image(img_pos)\n\n cam_ray_robot = camera.get_ray_in_robot_frame(cam_ray)\n\n cam_ray_lidar = lidar.get_ray_in_lidar_frame(cam_ray_robot)\n\n # flatten camera ray\n cam_ray_lidar_flat = lidar.get_ray_projection(cam_ray_lidar)\n\n # figure out which lidar rays correspond to the camera ray\n (ray1,ray2) = lidar.get_corresponding_lidar_rays(cam_ray_lidar_flat,scan)\n\n # if no rays found corresponding to scan data\n if ray1 is None or ray2 is None:\n return (None,None)\n\n # get the normal to the lidar hit\n intersection_normal = lidar.get_normal_to_plane(ray1,ray2)\n\n # get the distance data in horizontal plane, from lidar to object\n lidar_to_target_length = lidar.get_camera_ray_length(cam_ray_lidar_flat,ray1,ray2)\n\n # get the vector from camera to lidar (flattened to lidar plane)\n # i.e. origin of lidar frame in camera frame\n lidar_to_cam_vec = cam_ray_lidar_flat.origin\n cam_to_lidar_flat = Ray(lidar_to_cam_vec,-lidar_to_cam_vec,np.linalg.norm(lidar_to_cam_vec))\n \n # now workout the lidar to object ray, i.e. interpolate between ray1's and ray2's tips\n lidar_to_object_flat = interpolated_ray(ray1,ray2,0.5,lidar_to_target_length)\n\n # now finally workout the vector from camera to object (flattened)\n # this lets us access the true z-distance in the camera\n cam_to_object_flat = lidar_to_object_flat.get_vec() + cam_to_lidar_flat.get_vec()\n \n cam_to_object_flat_length = np.linalg.norm(cam_to_object_flat)\n\n # angle from horizontal on camera ray\n cam_ray_theta = angle_between(cam_ray_lidar.get_vec(),cam_to_object_flat)\n\n # length of original camera ray (knowing the length of its projection)\n # will fail if ray is pointing straight up or down\n cam_ray_robot.length = cam_to_object_flat_length / math.cos(cam_ray_theta)\n\n\n object_robot = cam_ray_robot.get_vec()+cam_ray_robot.origin\n\n return (object_robot,intersection_normal)", "def world_to_camera(self, X):\n raise NotImplementedError", "def generate_ray(self, img_point):\n # TODO A5 copy your implementation from A4\n i = img_point[0]\n j = img_point[1]\n dist_vector = self.target - self.eye\n proj_dist = np.linalg.norm(dist_vector)\n height = 2 * proj_dist * np.tan(self.vfov / 2.0)\n width = self.aspect * height\n left = (-1) * width / 2.0\n bottom = (-1) * height / 2.0\n u = i * width + left\n v = j * height + bottom\n ray_origin = self.eye\n ray_direction = ((-1) * proj_dist * self.w) + u * self.u + v * self.v\n return Ray(ray_origin, ray_direction)", "def rays(self):\n pixels = np.array([\n [u, v, 1.]\n for u, v in product(range(self.width), range(self.height))\n ], dtype=np.int32).T\n rays = project(self.camera.P_pinv, pixels)\n\n return self._camera.center, rays.T", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray", "def camera_to_world(self, X):\n raise NotImplementedError", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray", "def world_pos_from_img_pos(self, img_pos, img_shape, arm_pos, scale):\n centre_x = img_shape[0]/2\n centre_y = img_shape[1]/2\n #scale = 0.2*2/centre_x #m/pixel\n #print(\"centre x, y\")\n #print(centre_x)\n #print(centre_y)\n \n wld_x = arm_pos[0]\n wld_y = arm_pos[1]\n \n img_x = img_pos[0]\n img_y = img_pos[1]\n #print(\"img x, y\")\n #print(img_x)\n #print(img_y)\n \n img_dx = img_x - centre_x\n img_dy = img_y - centre_y\n #print(\"img dx, dy\")\n #print(img_dx)\n #print(img_dy)\n \n # +wld_x = -img_y ; +wld_y = -img_x\n wld_dx = -img_dy*scale\n wld_dy = -img_dx*scale\n\n #limit output\n #wld_dx = max(wld_dx, -centre_y*scale)\n #wld_dx = min(wld_dx, centre_y*scale)\n #wld_dy = max(wld_dy, -centre_x*scale)\n #wld_dy = min(wld_dy, centre_x*scale)\n \n new_wld_x = wld_x + wld_dx\n new_wld_y = wld_y + wld_dy\n \n return [new_wld_x, new_wld_y]", "def pinhole_projection_world_to_image(world_pos, K, camera_to_world=None):\n\n world_pos_vec = np.append(world_pos, 1)\n\n # transform to camera frame if camera_to_world is not None\n if camera_to_world is not None:\n world_pos_vec = np.dot(np.linalg.inv(camera_to_world), world_pos_vec)\n\n # scaled position is [X/Z, Y/Z, 1] where X,Y,Z is the position in camera frame\n scaled_pos = np.array([world_pos_vec[0]/world_pos_vec[2], world_pos_vec[1]/world_pos_vec[2], 1])\n uv = np.dot(K, scaled_pos)[:2]\n return uv", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def tanp_to_world(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix\n x = x + crpix1\n y = y + crpix2\n ra, dec = self._wcslin.all_pix2world(x, y, 1)\n return ra, dec", "def project_point_along_2Dvector(): \n \n # 2d vector \n a = vec2( 1 , 1 )\n b = vec2( -1 , -1 )\n com = vec2() \n\n #fb = pixel_op() \n #fb.create_buffer(800, 800)\n #fb.graticule(pixels_per_unit)\n\n vecs = [a,b]\n pts = [com.project_pt(a, b, 2)]\n\n bloody_simple_2drender('2d_render.png', vecs=vecs, pts=pts, gridsize=40)", "def toworld(self, *args, **kwargs):\n return _image.image_toworld(self, *args, **kwargs)", "def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def _compute_pixel_ray_direction(\n u: float, v: float, fx: float, fy: float, img_w: int, img_h: int\n) -> NDArrayFloat:\n if not np.isclose(fx, fy, atol=1e-3):\n raise ValueError(\n f\"Focal lengths in the x and y directions must match: {fx} != {fy}\"\n )\n\n # approximation for principal point\n px = img_w / 2\n py = img_h / 2\n\n # the camera coordinate frame (where Z is out, x is right, y is down).\n\n # compute offset from the center\n x_center_offs = u - px\n y_center_offs = v - py\n\n ray_dir: NDArrayFloat = np.array([x_center_offs, y_center_offs, fx])\n ray_dir /= np.linalg.norm(ray_dir)\n return ray_dir", "def pinhole_projection_image_to_world(uv, z, K):\n\n u_v_1 = np.array([uv[0], uv[1], 1])\n pos = z * np.matmul(inv(K),u_v_1)\n return pos", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def relative_pose_cam_to_body(\n relative_scene_pose, Rt_cam2_gt\n ):\n relative_scene_pose = (\n np.linalg.inv(Rt_cam2_gt)\n @ relative_scene_pose\n @ Rt_cam2_gt\n )\n return relative_scene_pose", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n \n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index, ray_target\n else:\n return None, None, None, ray_target" ]
[ "0.69458926", "0.6836335", "0.633762", "0.62048507", "0.6087586", "0.6003151", "0.5976462", "0.5972177", "0.5914467", "0.58434325", "0.58393484", "0.58110195", "0.57184476", "0.5707809", "0.570757", "0.5687363", "0.5659299", "0.56507516", "0.5646074", "0.5632133", "0.5627521", "0.55827916", "0.55540264", "0.55407435", "0.5528845", "0.5519932", "0.5513553", "0.55134356", "0.55119985", "0.5485858" ]
0.7174655
0
Generating grid of points biased by ppa (principal point delta)
def GeneratePointsImg(self, n, ppa): x = np.linspace(0,self.camera.sensorSize,n)+ppa[0] y = np.linspace(0,self.camera.sensorSize,n)+ppa[1] return np.meshgrid(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_pt(pt=None,):\n global dim\n mod_rand_pt = []\n\n for i_ in range(dim):\n for j_ in range(i_, dim):\n mod_rand_pt.append(pt[i_] * pt[j_])\n\n mod_rand_pt.append(1.)\n return mod_rand_pt", "def projection_P(P_prime):\n sorted_prime = -np.sort(-P_prime, axis=1) # Descending order sort\n cumsum_sorted = np.cumsum(sorted_prime, axis=1) # Compute cumulative sum of lines\n rho_availability = sorted_prime > (cumsum_sorted - 1) / np.arange(1, P_prime.shape[\n 1] + 1) # Compute non-zero rho candidates\n rho = np.count_nonzero(rho_availability, axis=1) # Compute number of non-zero values in final line (rho)\n theta = (cumsum_sorted[np.arange(len(rho)), rho - 1] - 1) / (rho) # Compute lagrange multiplier theta\n P = (P_prime.transpose() - theta).transpose().clip(min=0) # subtract multiplier, clip negatives\n\n return P", "def to_grid(point: np.array) -> np.array:\n return np.array((2.5, 2.5)) + point * 5", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def to_points(self, divisions=100):", "def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def g(arr, n, points):\n P, a, b = arr # extract\n xCoord = P.x # extract x coord\n xCoord = bin(P.x) # get binary representation\n xCoord = \"0\" * 4 + xCoord[2:] # pad front with 0's\n ind = int(xCoord[-4:], 2) # get random point by \"hashing P\"\n Q = points[ind] # extract random point\n return P + Q[0], (a + Q[1]) % n, (b + Q[2]) % n # return the addition", "def gen_points(lo, hi, N):\n\treturn np.linspace(lo, hi, num=N)\n\t\n\t## a = np.array(range(0, N))\n\t## return lo + (a * (hi-lo)/float(N))", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def add_points(grid, num_points):\n \n for i in range(num_points):\n # Coord for crit point\n rand_x = random.randint(0, GRID_WIDTH - 1)\n rand_y = random.randint(0, GRID_HEIGHT - 1)\n \n # Set value of crit point\n elev = (MAX_HEIGHT - MIN_HEIGHT) * random.random() + MIN_HEIGHT\n grid[rand_x][rand_y] = elev * PEAK_HEIGHT\n \n return grid", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def random_projection_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points) and the offset from the origin\n hyperplane_offset = 0.0\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = data[left, d] - data[right, d]\n hyperplane_offset -= hyperplane_vector[d] * (\n data[left, d] + data[right, d]) / 2.0\n\n # For each point compute the margin (project into normal vector, add offset)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = hyperplane_offset\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right", "def project_perp(A):\n return np.eye(A.shape[1]) - project(A)", "def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point", "def test_random_create_P():\n\n max_step = 100\n n = 50\n low = 1\n tol = 1e-8\n\n P_ι = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_δ = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_ζ = np.random.dirichlet(np.random.randint(low, high=max_step, size=50),\n size=2)\n\n P = create_P(P_δ, P_ζ, P_ι)\n\n assert abs(P[:, 0, :, :].sum() - 1.) < tol\n assert abs(P[:, 1, :, :].sum() - 1.) < tol", "def gausspp(npt):\n if npt <= 0:\n raise ValueError(\"Can't generate grid for <= 0 points\")\n return\n if npt == 1:\n xpt = np.array([0.0])\n wht = np.array([2.0])\n return xpt, wht\n\n # Each mesh is stored as a section of a big array.\n # These store its number and start index is here\n mesh_npts = [2,3,4,5,6,7,8,9,10,11,12,13,14,16,20,24,28,32,40,48,64,96]\n\n # First, look to see if the mesh is stored.\n # If not we take the largest number that is lower than that stored.\n for i in range(len(mesh_npts)):\n mesh_idx = i\n if mesh_npts[i] >= npt:\n break\n npt = mesh_npts[mesh_idx]\n n2 = int((npt+1)/2.0) # Care: Integer division!\n iof = npt\n\n # The stored grid parameters are accessed as a dict of arrays.\n x = {\n 2 : [0.577350269189626e0],\n 3 : [0.774596669241483e0, 0.0e0],\n 4 : [0.861136311594053e0, 0.339981043584856e0],\n 5 : [0.906179845938664e0, 0.538469310105683e0, 0.0e0],\n 6 : [0.932469514203152e0, 0.661209386466265e0, 0.238619186083197e0],\n 7 : [0.949107912342759e0, 0.741531185599394e0, 0.405845151377397e0, 0.0e0],\n 8 : [0.960289856497536e0, 0.796666477413627e0, 0.525532409916329e0, 0.183434642495650e0],\n 9 : [0.968160239507626e0, 0.836031107326636e0, 0.613371432700590e0, 0.324253423403809e0,\n 0.0e0],\n 10 : [0.973906528517172e0, 0.865063366688985e0, 0.679409568299024e0, 0.433395394129247e0,\n 0.148874338981631e0],\n 11 : [0.978228658146057e0, 0.887062599768095e0, 0.730152005574049e0, 0.519096129206812e0,\n 0.269543155952345e0, 0.0e0],\n 12 : [0.981560634246719e0, 0.904117256370475e0, 0.769902674194305e0, 0.587317954286617e0,\n 0.367831498998180e0, 0.125233408511469e0],\n 13 : [0.984183054718588e0, 0.917598399222978e0, 0.801578090733310e0, 0.642349339440340e0,\n 0.448492751036447e0, 0.230458315955135e0, 0.0e0],\n 14 : [0.986283808696812e0, 0.928434883663574e0, 0.827201315069765e0, 0.687292904811685e0,\n 0.515248636358154e0, 0.319112368927890e0, 0.108054948707344e0],\n 16 : [0.989400934991650e0, 0.944575023073232e0, 0.865631202387832e0, 0.755404408355003e0,\n 0.617876244402644e0, 0.458016777657227e0, 0.281603550779259e0, 0.950125098376369e-1],\n 20 : [0.993128599185095e0, 0.963971927277914e0, 0.912234428251326e0, 0.839116971822219e0,\n 0.746331906460151e0, 0.636053680726515e0, 0.510867001950827e0, 0.373706088715419e0,\n 0.227785851141645e0, 0.765265211334969e-1],\n 24 : [0.995187219997021e0, 0.974728555971309e0, 0.938274552002733e0, 0.886415527004401e0,\n 0.820001985973903e0, 0.740124191578554e0, 0.648093651936975e0, 0.545421471388839e0,\n 0.433793507626045e0, 0.315042679696163e0, 0.191118867473616e0, 0.640568928626059e-1],\n 28 : [0.996442497573954e0, 0.981303165370873e0, 0.954259280628938e0, 0.915633026392132e0,\n 0.865892522574395e0, 0.805641370917179e0, 0.735610878013632e0, 0.656651094038865e0,\n 0.569720471811402e0, 0.475874224955118e0, 0.376251516089079e0, 0.272061627635178e0,\n 0.164569282133381e0, 0.550792898840340e-1],\n 32 : [0.997263861849481e0, 0.985611511545268e0, 0.964762255587506e0, 0.934906075937740e0,\n 0.896321155766052e0, 0.849367613732570e0, 0.794483795967942e0, 0.732182118740290e0,\n 0.663044266930215e0, 0.587715757240762e0, 0.506899908932229e0, 0.421351276130635e0,\n 0.331868602282128e0, 0.239287362252137e0, 0.144471961582796e0, 0.483076656877380e-1],\n 40 : [0.998237709710559e0, 0.990726238699457e0, 0.977259949983774e0, 0.957916819213792e0,\n 0.932812808278676e0, 0.902098806968874e0, 0.865959503212259e0, 0.824612230833312e0,\n 0.778305651426519e0, 0.727318255189927e0, 0.671956684614179e0, 0.612553889667980e0,\n 0.549467125095128e0, 0.483075801686179e0, 0.413779204371605e0, 0.341994090825758e0,\n 0.268152185007254e0, 0.192697580701371e0, 0.116084070675255e0, 0.387724175060510e-1],\n 48 : [0.998771007252426e0, 0.993530172266351e0, 0.984124583722827e0, 0.970591592546247e0,\n 0.952987703160431e0, 0.931386690706554e0, 0.905879136715570e0, 0.876572020274248e0,\n 0.843588261624393e0, 0.807066204029443e0, 0.767159032515740e0, 0.724034130923815e0,\n 0.677872379632664e0, 0.628867396776514e0, 0.577224726083973e0, 0.523160974722233e0,\n 0.466902904750958e0, 0.408686481990717e0, 0.348755886292161e0, 0.287362487355455e0,\n 0.224763790394689e0, 0.161222356068892e0, 0.970046992094629e-1, 0.323801709628690e-1],\n 64 : [0.999305041735772e0, 0.996340116771955e0, 0.991013371476744e0, 0.983336253884626e0,\n 0.973326827789911e0, 0.961008799652054e0, 0.946411374858403e0, 0.929569172131939e0,\n 0.910522137078503e0, 0.889315445995114e0, 0.865999398154093e0, 0.840629296252580e0,\n 0.813265315122797e0, 0.783972358943341e0, 0.752819907260532e0, 0.719881850171611e0,\n 0.685236313054233e0, 0.648965471254657e0, 0.611155355172393e0, 0.571895646202634e0,\n 0.531279464019894e0, 0.489403145707053e0, 0.446366017253464e0, 0.402270157963992e0,\n 0.357220158337668e0, 0.311322871990211e0, 0.264687162208767e0, 0.217423643740007e0,\n 0.169644420423993e0, 0.121462819296120e0, 0.729931217877989e-1, 0.243502926634240e-1],\n 96 : [0.999689503883230e0, 0.998364375863181e0, 0.995981842987209e0, 0.992543900323762e0,\n 0.988054126329623e0, 0.982517263563014e0, 0.975939174585136e0, 0.968326828463264e0,\n 0.959688291448742e0, 0.950032717784437e0, 0.939370339752755e0, 0.927712456722308e0,\n 0.915071423120898e0, 0.901460635315852e0, 0.886894517402420e0, 0.871388505909296e0,\n 0.854959033434601e0, 0.837623511228187e0, 0.819400310737931e0, 0.800308744139140e0,\n 0.780369043867433e0, 0.759602341176647e0, 0.738030643744400e0, 0.715676812348967e0,\n 0.692564536642171e0, 0.668718310043916e0, 0.644163403784967e0, 0.618925840125468e0,\n 0.593032364777572e0, 0.566510418561397e0, 0.539388108324357e0, 0.511694177154667e0,\n 0.483457973920596e0, 0.454709422167743e0, 0.425478988407300e0, 0.395797649828908e0,\n 0.365696861472313e0, 0.335208522892625e0, 0.304364944354496e0, 0.273198812591049e0,\n 0.241743156163840e0, 0.210031310460567e0, 0.178096882367618e0, 0.145973714654896e0,\n 0.113695850110665e0, 0.812974954644249e-1, 0.488129851360490e-1, 0.162767448496020e-1]\n }\n wt = {\n 2 : [0.999999999999999e0],\n 3 : [0.555555555555556e0, 0.888888888888889e0],\n 4 : [0.347854845137454e0, 0.652145154862546e0],\n 5 : [0.236926885056189e0, 0.478628670499366e0, 0.568888888888889e0],\n 6 : [0.171324492379170e0, 0.360761573048139e0, 0.467913934572691e0],\n 7 : [0.129484966168870e0, 0.279705391489277e0, 0.381830050505119e0, 0.417959183673469e0],\n 8 : [0.101228536290376e0, 0.222381034453374e0, 0.313706645877887e0, 0.362683783378362e0],\n 9 : [0.812743883615739e-1, 0.180648160694857e0, 0.260610696402935e0, 0.312347077040003e0,\n 0.330239355001260e0],\n 10 : [0.666713443086879e-1, 0.149451349150581e0, 0.219086362515982e0, 0.269266719309996e0,\n 0.295524224714753e0],\n 11 : [0.556685671161740e-1, 0.125580369464905e0, 0.186290210927734e0, 0.233193764591990e0,\n 0.262804544510247e0, 0.272925086777901e0],\n 12 : [0.471753363865120e-1, 0.106939325995318e0, 0.160078328543346e0, 0.203167426723066e0,\n 0.233492536538355e0, 0.249147045813403e0],\n 13 : [0.404840047653160e-1, 0.921214998377279e-1, 0.138873510219787e0, 0.178145980761946e0,\n 0.207816047536889e0, 0.226283180262897e0, 0.232551553230874e0],\n 14 : [0.351194603317520e-1, 0.801580871597599e-1, 0.121518570687903e0, 0.157203167158194e0,\n 0.185538397477938e0, 0.205198463721296e0, 0.215263853463158e0],\n 16 : [0.271524594117540e-1, 0.622535239386480e-1, 0.951585116824929e-1, 0.124628971255534e0,\n 0.149595988816577e0, 0.169156519395002e0, 0.182603415044923e0, 0.189450610455068e0],\n 20 : [0.176140071391520e-1, 0.406014298003870e-1, 0.626720483341089e-1, 0.832767415767049e-1,\n 0.101930119817240e0, 0.118194531961518e0, 0.131688638449177e0, 0.142096109318382e0,\n 0.149172986472604e0, 0.152753387130726e0],\n 24 : [0.123412297999870e-1, 0.285313886289340e-1, 0.442774388174200e-1, 0.592985849154370e-1,\n 0.733464814110799e-1, 0.861901615319529e-1, 0.976186521041139e-1, 0.107444270115966e0,\n 0.115505668053726e0, 0.121670472927803e0, 0.125837456346828e0, 0.127938195346752e0],\n 28 : [0.912428259309400e-2, 0.211321125927710e-1, 0.329014277823040e-1, 0.442729347590040e-1,\n 0.551073456757170e-1, 0.652729239669989e-1, 0.746462142345689e-1, 0.831134172289009e-1,\n 0.905717443930329e-1, 0.969306579979299e-1, 0.102112967578061e0, 0.106055765922846e0,\n 0.108711192258294e0, 0.110047013016475e0],\n 32 : [0.701861000947000e-2, 0.162743947309060e-1, 0.253920653092620e-1, 0.342738629130210e-1,\n 0.428358980222270e-1, 0.509980592623760e-1, 0.586840934785350e-1, 0.658222227763619e-1,\n 0.723457941088479e-1, 0.781938957870699e-1, 0.833119242269469e-1, 0.876520930044039e-1,\n 0.911738786957639e-1, 0.938443990808039e-1, 0.956387200792749e-1, 0.965400885147279e-1],\n 40 : [0.452127709853300e-2, 0.104982845311530e-1, 0.164210583819080e-1, 0.222458491941670e-1,\n 0.279370069800230e-1, 0.334601952825480e-1, 0.387821679744720e-1, 0.438709081856730e-1,\n 0.486958076350720e-1, 0.532278469839370e-1, 0.574397690993910e-1, 0.613062424929290e-1,\n 0.648040134566009e-1, 0.679120458152339e-1, 0.706116473912869e-1, 0.728865823958039e-1,\n 0.747231690579679e-1, 0.761103619006259e-1, 0.770398181642479e-1, 0.775059479784249e-1],\n 48 : [0.315334605230600e-2, 0.732755390127600e-2, 0.114772345792340e-1, 0.155793157229440e-1,\n 0.196161604573550e-1, 0.235707608393240e-1, 0.274265097083570e-1, 0.311672278327980e-1,\n 0.347772225647700e-1, 0.382413510658310e-1, 0.415450829434650e-1, 0.446745608566940e-1,\n 0.476166584924900e-1, 0.503590355538540e-1, 0.528901894851940e-1, 0.551995036999840e-1,\n 0.572772921004030e-1, 0.591148396983960e-1, 0.607044391658940e-1, 0.620394231598930e-1,\n 0.631141922862539e-1, 0.639242385846479e-1, 0.644661644359499e-1, 0.647376968126839e-1],\n 64 : [0.178328072169600e-2, 0.414703326056200e-2, 0.650445796897800e-2, 0.884675982636400e-2,\n 0.111681394601310e-1, 0.134630478967190e-1, 0.157260304760250e-1, 0.179517157756970e-1,\n 0.201348231535300e-1, 0.222701738083830e-1, 0.243527025687110e-1, 0.263774697150550e-1,\n 0.283396726142590e-1, 0.302346570724020e-1, 0.320579283548510e-1, 0.338051618371420e-1,\n 0.354722132568820e-1, 0.370551285402400e-1, 0.385501531786160e-1, 0.399537411327200e-1,\n 0.412625632426230e-1, 0.424735151236530e-1, 0.435837245293230e-1, 0.445905581637560e-1,\n 0.454916279274180e-1, 0.462847965813140e-1, 0.469681828162100e-1, 0.475401657148300e-1,\n 0.479993885964580e-1, 0.483447622348030e-1, 0.485754674415030e-1, 0.486909570091400e-1],\n 96 : [0.796792065552010e-3, 0.185396078894692e-2, 0.291073181793495e-2, 0.396455433844469e-2,\n 0.501420274292752e-2, 0.605854550423596e-2, 0.709647079115386e-2, 0.812687692569876e-2,\n 0.914867123078339e-2, 0.101607705350080e-1, 0.111621020998380e-1, 0.121516046710880e-1,\n 0.131282295669610e-1, 0.140909417723140e-1, 0.150387210269940e-1, 0.159705629025620e-1,\n 0.168854798642450e-1, 0.177825023160450e-1, 0.186606796274110e-1, 0.195190811401450e-1,\n 0.203567971543330e-1, 0.211729398921910e-1, 0.219666444387440e-1, 0.227370696583290e-1,\n 0.234833990859260e-1, 0.242048417923640e-1, 0.249006332224830e-1, 0.255700360053490e-1,\n 0.262123407356720e-1, 0.268268667255910e-1, 0.274129627260290e-1, 0.279700076168480e-1,\n 0.284974110650850e-1, 0.289946141505550e-1, 0.294610899581670e-1, 0.298963441363280e-1,\n 0.302999154208270e-1, 0.306713761236690e-1, 0.310103325863130e-1, 0.313164255968610e-1,\n 0.315893307707270e-1, 0.318287588944110e-1, 0.320344562319920e-1, 0.322062047940300e-1,\n 0.323438225685750e-1, 0.324471637140640e-1, 0.325161187138680e-1, 0.325506144923630e-1]\n }\n\n # Now calculate the grid and weighting from these data chosen by npt\n\n mesh_r = x[npt]\n mesh_wt = wt[npt]\n\n r = np.zeros((2*n2))\n weight = np.zeros((2*n2))\n\n for i in range(n2):\n r[i] = -mesh_r[i]\n r[iof - (i + 1)] = mesh_r[i]\n weight[i] = mesh_wt[i]\n weight[iof - (i + 1)] = mesh_wt[i]\n\n return npt, r, weight", "def point_to_ppm(point, procs, proc2s):\n \n # It seems that F1 is related to the Y axis, while F2 is related to the X axis\n \n begin = (float(proc2s[\"OFFSET\"]), float(procs[\"OFFSET\"]))\n # End is begin-sw_p/sf, so step is (end-begin)/si, which simplifies to\n # (-sw_p/sf+1)/si\n step = [(-float(p[\"SW_p\"])/float(p[\"SF\"]))/float(p[\"SI\"]) \n for p in [proc2s, procs] ]\n \n return [begin[i]+step[i]*point[i] for i in (0,1)]", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def PCA_gen(pos, k = 6, self_loop = False):\n\n # Use PCA to find principle component projection\n p_components = PCA(n_components = 1).fit_transform(pos)\n\n a_idxs = neighbors(p_components, self_loop, k)\n ones = np.ones(size = a_idxs.shape[0])\n\n a = csr_matrix(ones, (a_idxs[:,0], a_idxs[:, 1]))\n\n return a", "def projective_point(p):\n from sage.rings.integer import GCD_list, LCM_list\n try:\n p_gcd = GCD_list([x.numerator() for x in p])\n p_lcm = LCM_list([x.denominator() for x in p])\n except AttributeError:\n return p\n scale = p_lcm / p_gcd\n return [scale * x for x in p]", "def guassian_point_process(x0, y0, xSigma, ySigma, nPoints):\n x = np.random.normal(loc=x0, scale=xSigma, size=(nPoints,))\n y = np.random.normal(loc=y0, scale=ySigma, size=(nPoints,))\n return x, y", "def get_interpolation_points(n_interpolation_points, grid, seed):\n np.random.seed(seed)\n\n grid_min = np.array(object=[min(v) for _, v in grid.items()])\n grid_max = np.array(object=[max(v) for _, v in grid.items()])\n\n points = []\n\n for _ in range(n_interpolation_points):\n tmp = np.random.uniform(0.0, 1.0, len(grid_min))\n points.append(tmp)\n\n interpolation_points = np.array(\n object=(\n points * grid_min\n + (np.ones((n_interpolation_points, len(grid_min))) - points) * grid_max\n ),\n dtype=float,\n )\n\n return interpolation_points", "def generar_polinomio(self):\n\t\tself.poli = 0\n\t\tfor i in range(len(self.v)):\n\t\t\tpoli2 = n(self.diferencias_divididas(self.v[0:i+1]))\n\t\t\tfor j in range(i):\n\t\t\t\tpoli2 *= self.x-self.v[j][0]\n\t\t\tself.poli = self.poli + poli2", "def sample_all_planck_points(all_ids, adaptivep0 = True, planck_tqu_cursor = None, planck_cov_cursor = None, region = \"SC_241\", verbose = False, tol=1E-5, sampletype = \"mean_bayes\", testproj=False):\n if testproj:\n all_naive_p = np.zeros(len(all_ids))\n all_naive_psi = np.zeros(len(all_ids))\n else:\n all_pMB = np.zeros(len(all_ids))\n all_psiMB = np.zeros(len(all_ids))\n\n if planck_tqu_cursor is None:\n print(\"Loading default planck_tqu_cursor because it was not provided\")\n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n \n if planck_cov_cursor is None:\n print(\"Loading default planck_cov_cursor because it was not provided\")\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n\n # Get p0 and psi0 sampling grids\n p0_all = np.linspace(0, 1, 165)\n psi0_all = np.linspace(0, np.pi, 165, endpoint=False) # don't count both 0 and pi\n\n update_progress(0.0)\n for i, _id in enumerate(all_ids):\n #if _id[0] in [3400757, 793551, 2447655]:\n posterior_obj = PlanckPosterior(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n #print(\"for id {}, p0 grid is {}\".format(_id, posterior_obj.sample_p0))\n #print(\"for id {}, pmeas is {}, psimeas is {}, psi naive is {}\".format(_id, posterior_obj.pmeas, posterior_obj.psimeas, posterior_obj.naive_psi))\n #print(\"for id {}, likelihood[0, 1] = {}\".format(_id, posterior_obj.posterior[0, 1]))\n #print(p0_all[0], psi0_all[1]) \n #lnlikeout = lnlikelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all[0], psi0_all[1])\n #print(\"for id {}, lnlikelihood[0, 1] = {}\".format(_id, lnlikeout[0]))\n #print(np.exp(lnlikeout[0]))\n \n if testproj:\n all_naive_p[i] = posterior_obj.pmeas\n all_naive_psi[i] = posterior_obj.psimeas \n else:\n if sampletype is \"mean_bayes\":\n all_pMB[i], all_psiMB[i] = mean_bayesian_posterior(posterior_obj, center = \"naive\", verbose = verbose, tol=tol)\n elif sampletype is \"MAP\":\n all_pMB[i], all_psiMB[i] = maximum_a_posteriori(posterior_obj, verbose = verbose)\n if verbose is True:\n print(\"for id {}, num {}, I get pMB {} and psiMB {}\".format(_id, i, all_pMB[i], all_psiMB[i]))\n\n update_progress((i+1.0)/len(all_ids), message='Sampling: ', final_message='Finished Sampling: ')\n \n if testproj:\n return all_naive_p, all_naive_psi\n else:\n return all_pMB, all_psiMB", "def generate_regular_grid_point_coords(R, side_size, device):\n aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)\n r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)\n return r.view(1, -1, 2).expand(R, -1, -1)", "def percolation_vs_p(w: int, h: int, nsim=40, n_p=50):\n p_values = np.linspace(0., 1., n_p) # n_p-value array between 0 and 1\n\n def plot_crossing_probability(ax, Percolation) -> np.ndarray:\n \"\"\"\n Plot crossing probabilities of a percolation of type Percolation\n \"\"\"\n\n print(f\"Computing crossing probabilities for {Percolation.grid_type} \"\n \"percolation\")\n cross_proba = np.zeros_like(p_values)\n for i in progressbar.progressbar(range(nsim)):\n perco = Percolation(w, h)\n p_cross = perco.find_p_cross()\n cross_proba += np.where(p_values < p_cross, 0, 1)\n\n cross_proba /= nsim\n ax.plot(p_values, cross_proba, '-',\n label=f'{Percolation.grid_type} percolation')\n\n fig, ax = plt.subplots()\n fig.suptitle('Probability of crossing as a function of $p$')\n ax.set_xlabel('$p$')\n ax.set_ylabel('probability')\n ax.grid()\n plot_crossing_probability(ax, PercolationRect)\n plot_crossing_probability(ax, PercolationHex)\n ax.legend()\n ax.set_title(f\"{nsim} simulations on a {w} x {h} grid\", fontsize=10)", "def generate_points(octrees, pyramids, exsum):\n return _C.ops.spc.GeneratePoints(octrees.contiguous(),\n pyramids.contiguous(),\n exsum.contiguous())", "def pareto_distribution(v, p=0.8):\n thr = np.sum(v)*p\n cumsum = 0\n for i, _v in enumerate(v, 1):\n cumsum += _v\n if cumsum >= thr:\n return i * 1.0 / len(v)" ]
[ "0.60559386", "0.59712946", "0.59704006", "0.595379", "0.5917291", "0.58962584", "0.58279955", "0.582432", "0.5792295", "0.57677454", "0.5709391", "0.5696328", "0.56858194", "0.56748235", "0.5633881", "0.5615407", "0.56149656", "0.560475", "0.560267", "0.5601162", "0.5589663", "0.5574519", "0.5569534", "0.55672973", "0.5561524", "0.5556496", "0.5550597", "0.55192643", "0.5518998", "0.551796" ]
0.6255561
0
Description When is given a directory name that exist Expected Result Shows log that directory was found
def test_has_directory_log(self, check_fn_true, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/observed" #when test1 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It was found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_ensure_dir_exists(self):\n pass", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def Directory(self) -> str:", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def test_has_directory(self, check_fn_true):\n\n #setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n \n #when\n test1 = has_directory(\"./data/observed\")\n\n #result\n assert test1 is True", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def test_test_directory_identifer_exists(self):\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with identifier \"\n \"'dir1'.\"\n )\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Check that test directory was created and exit the \"\n \"context.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n first_stat = directory.stat()\n\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with the \"\n \"same identifer.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n\n self.logger.info(\"STEP: Verify that the folder was re-used.\")\n self.assertEqual(\n first_stat,\n directory.stat(),\n \"Second directory is not the same as the first directory.\",\n )", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def is_dir(self, path):", "def is_valid_directory(parser, arg):", "def scan_sample_directory(sample_dir: Path) -> None:\n if not (sample_dir / 'README.md').is_file():\n print(f\"WARNING ({sample_dir}): No README.md file\")\n if not (sample_dir / 'main.py').is_file():\n print(f\"ERROR ({sample_dir}): No main.py file\")", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def _existDir(d):\n\treturn os.path.exists(d)" ]
[ "0.74612963", "0.66516936", "0.6648649", "0.6637668", "0.6592905", "0.65451527", "0.6507091", "0.6476768", "0.6445445", "0.64397144", "0.641731", "0.6323137", "0.6317305", "0.6291415", "0.6287829", "0.6264551", "0.62638617", "0.62398607", "0.6232754", "0.6224753", "0.62079227", "0.62067014", "0.62065524", "0.61675984", "0.6151805", "0.61463827", "0.6136915", "0.6132248", "0.61214143", "0.6103149" ]
0.7359447
1
Description When is given a directory name that doesnt exist Expected Result returns False
def test_doesnt_have_directory(self, check_fn_false): # setup has_directory = extractor.make_has_directory(os.path.isdir) # when test2 = has_directory("./data/tests") # result assert test2 is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def is_directory(path_name):\n if not is_file(path_name):\n return True\n else:\n return False", "def is_valid_directory(parser, arg):", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def is_dir(self, path):", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def test_ensure_dir_exists(self):\n pass", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def _is_directory(input_data) -> bool:\n # TODO(cezequiel): Implement in phase 2.\n _ = input_data\n return False", "def _existDir(d):\n\treturn os.path.exists(d)", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def isDir(self, fname):\n\t\tif fname in self.getAllDirs():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def empty_dir(value):\n return not os.listdir(value)", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def check_is_directory(val, name):\n check_path_exists(val, name)\n if not os.path.isdir(val):\n raise ValueError(name + ' of value ' + val + '\" is not a legal directory.')", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def test_has_directory(self, check_fn_true):\n\n #setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n \n #when\n test1 = has_directory(\"./data/observed\")\n\n #result\n assert test1 is True", "def IsADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.EISDIR", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def isdir (self, path):\r\n pass", "def is_dir(self, path: PathLike):" ]
[ "0.7637506", "0.7587286", "0.75317335", "0.7509946", "0.7493378", "0.74204826", "0.74024594", "0.7333082", "0.7327076", "0.7321167", "0.73203564", "0.72724897", "0.7250266", "0.7245775", "0.7242602", "0.7218288", "0.7178371", "0.71603537", "0.71599776", "0.714316", "0.7106386", "0.71054476", "0.70886093", "0.7052941", "0.7050756", "0.7017541", "0.70102763", "0.7010186", "0.7003531", "0.6987885" ]
0.7918296
0
Description When is given a directory name that doesnt exist Expected Result Shows log that directory wasn't found
def test_doesnt_have_directory_log(self, check_fn_false, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/tests" #when test2 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It wasn't found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def test_ensure_dir_exists(self):\n pass", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def test_has_directory_log(self, check_fn_true, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/observed\"\n \n #when\n test1 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It was found directory {directory_path}\"", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)", "def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def valid_directory(self, directory):\n\n if os.path.isdir(directory):\n return directory\n else:\n msg = f\"The write directory provided by the user does not exist: {directory}\"\n logging.exception(msg)\n self.close_logger()\n raise NotADirectoryError(msg)", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def test_non_existing_directory_raises_when_metavar_is_dir_for_db_export_cleaned(self):\n with contextlib.redirect_stderr(io.StringIO()) as stderr:\n with pytest.raises(SystemExit):\n parser = cli_parser.get_parser()\n parser.parse_args([\"db\", \"export-archived\", \"--output-path\", \"/non/existing/directory\"])\n error_msg = stderr.getvalue()\n\n assert error_msg == (\n \"\\nairflow db export-archived command error: The directory \"\n \"'/non/existing/directory' does not exist!, see help above.\\n\"\n )", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_process_args_should_reject_non_existent_input_directory(self, arg_dict):\n self.use_source_path(arg_dict, 'sample/directory_does_not_exist/')\n self.use_resolution_val(arg_dict, 600)\n\n with pytest.raises(FileNotFoundError):\n change_resolution.process_args(arg_dict)", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def is_valid_directory(parser, arg):", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0" ]
[ "0.7097761", "0.69815487", "0.6958049", "0.69345635", "0.68600756", "0.6768691", "0.67403984", "0.67001784", "0.66480124", "0.65875924", "0.6568089", "0.6561216", "0.6556799", "0.6535799", "0.6534498", "0.6532836", "0.6514325", "0.65090376", "0.64824975", "0.6481356", "0.6477626", "0.64698684", "0.63911563", "0.6369664", "0.6364753", "0.6346357", "0.63405997", "0.63157886", "0.63157886", "0.6312728" ]
0.7614185
0
Description When is given a directory path that has forecast as parent folder and csv file with desired name Expected Result returns dictionary with right data
def test_forecast_folder_path(self): #setup filepath = ".data/forecast/Kano-KN_-9.09_7.39.json" expected_result = { "type": "forecast", "city": "Kano", "state": "KN", "coordinates": ['-9.09', '7.39'], "forecast": {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def test_observed_folder_path(self):\n\n #setup\n filepath = \".data/observed/Abadia-BA_-11.56_-37.52.csv\"\n expected_result = {\n \"type\": \"observed\",\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": ['-11.56', '-37.52'],\n \"observed\": {}\n }\n \n #result\n assert extractor.get_metadata_from_filepath(filepath) == expected_result", "def csv_path(name):\n return \"./data/%s\" % name", "def add_path_dict(input_dict: dict, start_path: str, file_path: str):\n # Determine relative path\n relpath = os.path.relpath(file_path, start=start_path)\n\n # If only file remaining, store in dict, otherwise go 1 level deeper\n if relpath == os.path.basename(file_path):\n input_dict[os.path.splitext(relpath)[0]] = pd.read_csv(file_path,\n sep='\\t')\n else:\n parent_dir = relpath.split(os.sep)[0]\n if parent_dir not in input_dict.keys():\n input_dict[parent_dir] = {}\n add_path_dict(input_dict=input_dict[parent_dir],\n start_path=os.path.join(start_path, parent_dir),\n file_path=file_path)", "def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def import_to_df(\n list: str,\n path: str = \"competition_data\"\n ) -> dict:\n\n df_dict = {}\n for file in list:\n if 'csv' not in file:\n continue\n df = pd.read_csv(\"/\".join([path, file]))\n # remove extension\n name = file.split('.')[0]\n df_dict[name] = df\n \n return df_dict", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def extract_csv_for_date(config, data_date): \n \n ### TODO: test config separately \n \n # print(config.DATA_ROOT)\n # print(data_date)\n \n # Raise an exception if attribute DATA_ROOT does not exist\n if not 'DATA_ROOT' in vars(config):\n raise AttributeError(\"Attribute DATA_ROOT does not exist\")\n \n # Raise an exception if DATA_ROOT does not exist\n if not os.path.exists(config.DATA_ROOT):\n raise NotADirectoryError(\"The path \" + config.DATA_ROOT + \" not found\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'METER_CHANNEL_DICT' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'SAMPLE_TIME' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n data_date_dt = parse(data_date)\n \n if data_date_dt > config.DATA_END_DATE:\n raise ValueError(\"data_date entered is greater than the DATA_END_DATE: \" + \n str(config.DATA_END_DATE))\n \n if data_date_dt < config.DATA_START_DATE:\n raise ValueError(\"data_date entered is less than the DATA_START_DATE: \" + \n str(config.DATA_START_DATE))\n \n # Get the year, month and and day from date entered\n data_year = data_date_dt.year\n data_month = data_date_dt.month\n data_day = data_date_dt.day\n \n # Get the corresponding path in the directory to look for the data for the day\n data_path = os.path.join(config.DATA_ROOT, str(data_year), \"{:02}\".format(data_month), \"{:02}\".format(data_day))\n # print(data_path)\n # Find the count of meters\n meter_count = len(config.METER_CHANNEL_DICT)\n\n # Dictionary to store the names of the resulting csv files\n meter_csv_names = {}\n \n # Get the down-sampling time\n sample_time = config.SAMPLE_TIME\n \n # Create a dictionary with keys are meter names and values as dataframes \n # containing the data for the day\n meter_collection = {}\n \n # for meter_name in config.METER_CHANNEL_DICT:\n # # Create an empty dataframe, the columns will be created later\n # meter_collection[meter_name] = pd.DataFrame()\n\n #print(meter_collection)\n if os.path.exists(data_path):\n # Walk through all the files in the directory for the day's data\n for dirpath, dirnames, files in os.walk(data_path, topdown=True):\n # `files` contains the names of all the files at the location\n if len(files) == 0:\n print(\"No files found for day: \" + data_path)\n continue\n for filename in files:\n # Get the netcdf files, these are files with `.nc` extension\n if filename.lower().endswith('.nc'):\n # For the particular file, find out the corresponding meter and channel \n [meter, channel] = extract_ppty(filename, config.METER_CHANNEL_DICT.keys())\n # Create an entry in the `meter_collection` dict if it does not exist yet\n if meter not in meter_collection:\n meter_collection[meter] = pd.DataFrame()\n # Form the resulting csv name from the meter name if it doesnt exist yet\n # They are of the type - meter_name@Timestamp@Duration@Frequency\n # For e.g.: PQube3@2017-11-01T080002Z@[email protected]\n #print(meter, channel)\n if meter not in meter_csv_names:\n meter_csv_names[meter] = '@'.join([meter, '@'.join(filename.split('@')[1:4])])[:-3] + '.csv'\n #print(meter_csv_names)\n # Get the full path of the csv\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # Only extract if not already extracted to csv\n if (not os.path.isfile(csv_name)):\n # Get the dataframe containing time and channel values\n channel_df = extract_data(dirpath, filename)\n # Give the dataframe column a name\n channel_df.columns = [channel]\n # Down-sample the data to the sampling time intended\n channel_resampled = data_resample(channel_df, sample_time)\n # If our meter dataframe is empty so far, i.e. if this is the \n # first channel being entered, then create a copy of the \n # resampled dataframe\n if meter_collection[meter].empty:\n meter_collection[meter] = channel_resampled.copy()\n ####################### \n # This `else` clause handles two cases:\n # 1. If the dataframe is not empty, then add other columns to\n # the dataframe. (the else case)\n # 2. Some days have data downloaded more than once, this means \n # that channels can occur more than once. (like 05/21/2018)\n #######################\n else:\n # If the channel already exists in the dataframe\n # then either the other file has updated data or \n # subsequent data. \n if channel in meter_collection[meter].columns:\n # Get index from total dataframe \n idx_1 = meter_collection[meter].index\n # Get index from file dataframe\n idx_2 = channel_resampled.index\n # Compare the two, if the index is contained within,\n # then **update** the channel's value for file's indices. \n if np.all(np.isin(idx_2, idx_1)):\n meter_collection[meter][channel].loc[idx_2] = channel_resampled.values.tolist()\n # If the index is not contained, append the file df to\n # the total dataframe\n else:\n meter_collection[meter] = meter_collection[meter].append(channel_resampled, sort=False)\n meter_collection[meter].sort_index(inplace=True)\n #######################\n # This data is resampled a second time to handle two cases:\n # 1. When appending a resampled dataframe to an already resampled dataframe, the last\n # index of the original dataframe and the first index of the new dataframe can have\n # the same time. Resampling the appended dataframe will eliminate the repetitions.\n # 2. If the new dataframe to be appended starts at a much later time, resampling the\n # appended dataframe will create rows of missing data (NaN) at the times with no\n # measurement values. This makes it easier to detect missing measurement values and\n # perform data imputation at a later phase.\n #######################\n meter_collection[meter] = data_resample(meter_collection[meter], sample_time)\n # If the channel does not already exist, then add the\n # file dataframe to the total df. \n else:\n meter_collection[meter] = meter_collection[meter].join(channel_resampled, how='outer')\n else:\n print(\"Path not found: \" + data_path)\n \n # Perform data imputation wherrever needed\n # print(meter_collection)\n meter_collection = data_impute(meter_collection)\n \n # Write the total dataframes to csv file\n for meter in meter_collection:\n # Reorganize the order of columns to match the database tables \n meter_channels = config.METER_CHANNEL_DICT[meter]\n # meter_collection[meter].reset_index(inplace=True)\n meter_collection[meter] = meter_collection[meter].reindex(columns=meter_channels[1:])\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # print(csv_name)\n # Only write csv if it does not exist yet\n if(not os.path.isfile(csv_name)):\n meter_collection[meter].to_csv(csv_name, header=False)\n\n return meter_csv_names", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def update_csv():\n return os.listdir('./data')", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def read_weatherstations(path_to_data):\n namedict = read_weatherstationnames(path_to_data)\n stations = {}\n for i in namedict:\n filename = namedict[i].replace(' ', '_') + '.csv'\n print(\"Reading\", filename)\n ws = read_station_csv(os.path.join(path_to_data, filename))\n stations[i] = ws\n return stations", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def convert_dataset(filename, subdirectory='data'):\n \"\"\" Single path case. \"\"\"\n if 'TRIP_ID' not in next(read_csv(filename, subdirectory)):\n return [DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING']) for line in read_csv(filename, subdirectory)]\n\n \"\"\" Multiple path case. \"\"\"\n paths = {}\n for line in read_csv(filename, subdirectory):\n next_point = DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING'])\n try:\n paths[line['TRIP_ID']].append(next_point)\n except KeyError:\n paths[line['TRIP_ID']] = [next_point]\n\n return list(paths.values())", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)", "def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV", "def task_lst_gen(dirr, csv_path):\n train_file_lst, val_file_lst, test_file_lst = files_from_csv(csv_path)\n\n task_dict = {}\n out_prefix = '/work/jfeins1/maestro/dataset-v3/'\n for subdirs, dirs, files in os.walk(dirr):\n for file in files:\n filepath = subdirs + os.sep + file\n\n if file in train_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'train/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in test_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'test/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in val_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'val/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n task_lst = open('/work/jfeins1/maestro/encoding_gen_task.lst', 'w')\n for uid, d in task_dict.items():\n print(d['in'], d['out'], file=task_lst)", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def get_data( name=None, force_download=False, version=19, target_extension='.csv' ):\n os.makedirs(DATA_PATH, exist_ok=True)\n\n def download_data( version ):\n url = \"https://ndownloader.figshare.com/articles/14766102/versions/\" + str(version)\n target_file_name = \"14766102.zip\"\n target_file_name_path = tf.keras.utils.get_file(target_file_name, url,\n cache_subdir=DATA_PATH, extract = True )\n os.remove( DATA_PATH + target_file_name )\n\n if force_download:\n download_data( version = version )\n\n\n files = []\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if len( files ) == 0 :\n download_data( version = version )\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if name == 'all':\n return files\n\n datapath = None\n\n for fname in os.listdir(DATA_PATH):\n mystem = (Path(fname).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n if ( name == mystem and fname.endswith(target_extension) ) :\n datapath = os.path.join(DATA_PATH, fname)\n\n if datapath is None:\n raise ValueError('File doesnt exist. Options: ' , os.listdir(DATA_PATH))\n return datapath", "def get_files(input_dir):\n file_rep = { \"tars\" : [] }\n \n files = os.listdir(input_dir)\n \n the_file, the_date = find_bootstrap(files)\n \n #add index file in file_rep\n file_rep['index'] = the_file\n file_rep['date'] = the_date\n \n pattern = \"ncep_forecast_%s_(?P<name>\\S+).tar\" % (the_date)\n \n the_re = re.compile(pattern)\n\n for the_file in files:\n matched = the_re.match(the_file)\n if matched:\n print(\"matched %s\" % (matched.group(\"name\")))\n file_rep['tars'].append(the_file)\n \n return file_rep", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def get_csv_data(csv_path: str, img_dir: str) -> pd.DataFrame:\r\n data = pd.read_csv(csv_path)\r\n data['title'] = data['title'].apply(preprocess_titles)\r\n data['image'] = data['image'].apply(abs_path, args=(img_dir,))\r\n return data" ]
[ "0.60329616", "0.5951354", "0.5933117", "0.59185493", "0.5806007", "0.57351774", "0.5663706", "0.5627654", "0.562072", "0.55951595", "0.55628073", "0.55555654", "0.5554841", "0.5539798", "0.5537601", "0.5528273", "0.5500308", "0.5499843", "0.54943573", "0.54920584", "0.5481313", "0.5477954", "0.5464948", "0.5448699", "0.5443295", "0.543615", "0.5419603", "0.5418583", "0.53786147", "0.5378308" ]
0.6474041
0
Description When is given a csv_filepath and output_filepath and its the first time reading it Expected Result creates a json file with right values
def test_first_time_reading_csv_file(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def get_data(self, csv_file):\n pass", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('Downloading data set from DC Open data')\n\n with open(input_filepath, 'r') as f:\n parking_violations = json.load(f)\n\n for fullname, csv in parking_violations.items():\n download_file = csv + '.csv'\n local_filename = '_'.join(name.lower() for name in fullname.split() ) + '.csv'\n local_filename = os.path.join(output_filepath, local_filename)\n if not os.path.isfile(local_filename):\n time.sleep(5)\n r = requests.get(download_file)\n if not b'\"status\":\"Processing\",\"generating\":{}' in r.content:\n with open(local_filename, 'wb') as f:\n f.write(r.content)\n logger.info(local_filename)\n else:\n logger.warning('Cannot download {0}'.format(local_filename))", "def loadCSV(input_file):", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def test_parse(self, tmpdir):\n json_file = tmpdir.join(\"f.json\")\n obj = {\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}\n with open(str(json_file), \"w\") as f:\n json.dump(obj, f)\n\n csv_file = tmpdir.join(\"f.csv\")\n csv_file.write(\"\\n\".join([\n \",\".join(HEADER_ROW),\n \"ds,1,url,title,yes,no,{}\".format(str(json_file))\n ]))\n\n expected = {\n \"ds\": {\n \"generate_aggregation\": True,\n \"include_in_wms\": False,\n \"tech_note_title\": \"title\",\n \"tech_note_url\": \"url\",\n \"files\": [\n {\"path\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}\n ]\n }\n }\n\n s = StringIO()\n sys.stdout = s\n parse_file(str(csv_file))\n sys.stdout = sys.__stdout__\n\n output_json = s.getvalue()\n try:\n parsed = json.loads(output_json)\n except ValueError:\n assert False, \"parse_file() produced invalid JSON\"\n\n assert parsed == expected", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def initCSV(self, makeFile, overWrite):\n self.initialized = True\n\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n if os.path.exists(str(self.fileName)):\n\n f = open(str(self.fileName), \"r\")\n\n if not f.read():\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n else:\n if overWrite == True:\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n if overWrite == False:\n raise OSError(\"csv file is not empty!\")\n\n else:\n if makeFile == True:\n f = open(str(self.fileName), \"w\")\n \n f.close()\n else:\n raise OSError(\"csv file not found!\")", "def file_setup(outfile):\n\n extant_objids = []\n\n if os.path.exists(outfile):\n print('This file exists.')\n try:\n extant_objids = np.array(pd.read_csv(outfile)['objid']).tolist()\n except:\n print('And nonstandard!')\n # Raise an exception?\n return False\n else:\n # Initialize the file with a header\n with open(outfile, 'wb') as csvfile:\n cols = ['objid', 'flat_counts', 'mcat_bg', 'bg_counts',\n 'flux_bgsub_err', 'cps_mcatbgsub', 'counts',\n 'mag_mcatbgsub', 'cps_err', 'mag_bgsub', 'cps_bgsub',\n 'detys', 'flux_bgsub', 'flux_err', 'mag_err_1',\n 'cps_bgsub_err', 't1_data', 'bg', 'responses', 't_mean',\n 'cps_mcatbgsub_err', 'mag_bgsub_err_1', 'mag_err_2',\n 't0_data', 'racent', 'deccent', 'mag', 'exptime',\n 'bg_flat_counts', 'detxs', 't0', 't1',\n 'mag_mcatbgsub_err_2', 'flux', 'mag_mcatbgsub_err_1',\n 'flags', 'mag_bgsub_err_2', 'detrad', 'cps',\n 'flux_mcatbgsub_err', 'flux_mcatbgsub', 'mcat_expt', 'ra',\n 'dec', 'aper4', 'aper4_err', 'mcat_bg',\n 'aper7', 'aper7_err']\n\n spreadsheet = csv.writer(csvfile, delimiter=',', quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n spreadsheet.writerow(cols)\n\n return extant_objids", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def __openAndInitCSVFile(self, modelInfo):\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv", "def convert_to_json(dict_to_convert, csv_file):\n json_file = csv_to_json(csv_file)\n\n with open(json_file, 'w') as file:\n json.dump(dict_to_convert, file)\n\n logging.info('JSON file written with heart rate metrics')\n return 0", "def test_json_file(self):\n #response = os.system(\"python3 client.py -f filename.csv\")\n response = client.result(False, 'json', 'unittest',file = 'test_file.csv')\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')" ]
[ "0.711705", "0.6895903", "0.6818029", "0.6596224", "0.6538705", "0.6498996", "0.647413", "0.64032245", "0.632765", "0.63111657", "0.6258295", "0.62576884", "0.62437224", "0.6213409", "0.6196973", "0.6165735", "0.6092725", "0.6078405", "0.607367", "0.60391676", "0.6012014", "0.600576", "0.59839934", "0.59822106", "0.59682006", "0.59545165", "0.59063864", "0.5858894", "0.5857819", "0.58520776" ]
0.7492845
0
Description When is given a csv_filepath and output_filepath and already exists the file Expected Result concatenate the old json file with the values found in 2nd reading.
def test_when_file_already_exist(self): # Create a temporary directory for test files temp_dir = ["test_files/observed", "test_files/forecast", "test_files/output"] for dir in temp_dir: os.makedirs(dir, exist_ok=True) # Create the 1st csv file first_csv_filepath = os.path.join(temp_dir[0], "Abadia-BA_-11.56_-37.52.csv") with open(first_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Creating the 2nd csv file in different directory second_csv_filepath = os.path.join(temp_dir[1], "Abadia-BA_-11.56_-37.52.csv") with open(second_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir[2], "BA_Abadia.json") # Call the function under test extractor.csv_to_json(first_csv_filepath, temp_dir[2]) extractor.csv_to_json(second_csv_filepath, temp_dir[2]) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, "forecast": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, } # Assertion assert json_data == expected_data # Clean up the temporary directory and files os.remove(first_csv_filepath) os.remove(second_csv_filepath) os.remove(expected_output_filepath) for dir in temp_dir: os.rmdir(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file", "def combine(self, input_file, output_file):\n \n csvOutput = self.csvData.readCsv(input_file)\n \n # Extract csv column names\n columnsNmes = csvOutput[0]\n \n response = []\n \n # Remove first row from csv output (columns names)\n iterCsvOutput = iter(csvOutput)\n next(iterCsvOutput)\n # Get all api data with one call\n apiResponse = self.apiData.getContent(self.apiUrl)\n\n # Iterate over Csv lines\n for item in iterCsvOutput: \n\n # For each CSV line find corresponding account in Api response\n for apiItem in apiResponse['results']:\n\n # I Api response match Csv row combine data\n if str(apiItem['account_id']) == item[0]: \n # Add response form Api\n item.insert(4, apiItem['status'])\n item.insert(5, apiItem['created_on'])\n response.append(item)\n \n # Add row with new column names\n columnsNmes.insert(4, 'Status')\n columnsNmes.insert(5, 'Status Set On')\n response.insert(0, columnsNmes)\n\n # Generate csv output file\n self.csvData.writeCsv(response, output_file)\n \n return 'Given output file has been generated'", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def seed_from_csv_diff(original_file_path, new_file_path, model, **kwargs):\n\n original_diff_set = set()\n new_diff_set = set()\n new_file = open(new_file_path, 'r')\n headers = new_file.readline().replace('\\n', '').split(',')\n new_reader = model.update_set_filter(csv.reader(new_file), headers)\n\n original_file = open(original_file_path, 'r')\n original_reader = csv.reader(original_file)\n next(original_reader, None)\n logger.debug(\" * Beginning CSV diff process.\")\n\n for row in new_reader:\n new_diff_set.add(json.dumps(row))\n\n for row in original_reader:\n original_diff_set.add(json.dumps(row))\n\n diff = new_diff_set - original_diff_set\n temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(\n 'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')\n with open(temp_file_path, 'w') as temp_file:\n writer = csv.writer(temp_file, delimiter=',')\n writer.writerow(headers)\n for row in diff:\n writer.writerow(json.loads(row))\n\n diff_gen = from_csv_file_to_gen(temp_file_path, kwargs['update'])\n logger.debug(\" * Csv diff completed, beginning batch upsert.\")\n batch_upsert_from_gen(model, diff_gen, settings.BATCH_SIZE, **kwargs)\n if os.path.isfile(temp_file_path):\n os.remove(temp_file_path)\n if 'callback' in kwargs and kwargs['callback']:\n kwargs['callback']()", "def concat_vsource_sink_csv(csv_fn1,csv_fn2,merged_source_sink_in,\n csv_type,csv_merged,freq='infer',how='left'):\n # merged_source_sink_in: the merged source_sink.in or source_sink.yaml file \n # where the data sources are from csv_fn1, csv_fn2. \n if merged_source_sink_in.endswith('yaml'):\n df_sources,df_sinks = read_source_sink_yaml(merged_source_sink_in)\n elif merged_source_sink_in.endswith('in'):\n df_sources,df_sinks = read_source_sink_in(merged_source_sink_in)\n else:\n raise NotImplementedError(\n 'merged_source_sink_in can either be .yaml or .in file')\n if csv_type == 'sources':\n sites = df_sources.index\n elif csv_type == 'sink':\n sites = df_sinks.index\n else:\n raise NotImplementedError('csv_type can either be sources or sinks')\n th1 = read_source_sink_csv(csv_fn1)\n th2 = read_source_sink_csv(csv_fn2)\n if freq=='infer':\n if th1.index.freq!=th2.index.freq:\n print(\"th1 and th2 has different frequency\")\n else:\n th1 = th1.asfreq(freq)\n th2 = th2.asfreq(freq)\n th_merged = th1.join(th2,how=how,rsuffix='r').drop(columns=['datetimer'])\n th_merged = th_merged.fillna(-9999.0)\n cols = np.append(['datetime'],sites)\n th_merged = th_merged[cols] #rearrange the array to have the same order as defined in merged_source_sink_in\n th_merged['datetime'] = np.datetime_as_string(th_merged.index.values,'h')\n write_source_sink_csv(th_merged,csv_merged)", "def recalc_csv(input_path, config_file, family_name, source_string, quiet):\n csv_file = getCsvPath(input_path)\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n # If config.json doesn't exist, it has to be created before.\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='yellow')\n\n if os.path.exists(csv_file) and not quiet:\n confirmation = click.confirm(\n '\\n{} already exists. Do you want to overwrite it?'.format(csv_file))\n if confirmation is True:\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')\n else:\n # Let's ensure that, if the data.csv file doesn't exist,\n # it is created before recalculation.\n if not os.path.exists(csv_file):\n csvHandler(csv_file).resetCSV(config_file=config_file)\n\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')", "def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count", "def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def loop_csv(input_csv_path, output_csv_path):\n counter = 0\n with open(input_csv_path, 'rb') as read_csvfile:\n projectsreader = csv.DictReader(\n read_csvfile, delimiter=',', quotechar='\"')\n\n with open(output_csv_path, 'w') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl', 'foundProjectUrl1',\n 'foundProjectUrl2', 'foundProjectUrl3',\n 'foundProjectUrl4', 'foundProjectUrl5',\n 'foundProjectUrl6', 'foundProjectUrl7',\n 'foundProjectUrl8', 'foundProjectUrl9',\n 'foundProjectUrl10']\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n # writer.writeheader() # this method only available at python 2.7\n for row in projectsreader:\n if counter == 100:\n time.sleep(86400) # sleep 1 day\n counter = 0\n\n res = query_google_cse(\n row['acronym'] + \" \" + row['title'] +\n \" project -site:cordis.europa.eu -site:ec.europa.eu\")\n\n # save response to file\n with open('responses_gcse.json', 'w') as outfile:\n json.dump(res, outfile)\n\n # a query response may not have 10 results, so we have to check\n # for that\n results = []\n result_size = res['queries']['request'][0]['totalResults']\n\n print \"INFO: RESULT SIZE %s\" % result_size\n for i in range(10):\n if i < int(result_size):\n results.append(res['items'][i]['link'])\n else:\n results.append('')\n\n # print \"Control Print: \" + res['items'][0]['link']\n print \"INFO: First Result: \" + results[0]\n writer.writerow({\n 'acronym': row['acronym'],\n 'title': row['title'],\n 'projectUrl': row['projectUrl'],\n 'foundProjectUrl1': results[0],\n 'foundProjectUrl2': results[1],\n 'foundProjectUrl3': results[2],\n 'foundProjectUrl4': results[3],\n 'foundProjectUrl5': results[4],\n 'foundProjectUrl6': results[5],\n 'foundProjectUrl7': results[6],\n 'foundProjectUrl8': results[7],\n 'foundProjectUrl9': results[8],\n 'foundProjectUrl10': results[9],\n })\n sys.stdout.flush()\n time.sleep(2)\n counter += 1", "def combine_files(file_name):\n\n\tif file_name == \"train\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Train/all_level1_train.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Train/all_level1_train.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Train/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop([\"Response\", \"Id\"],1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Response\")\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col + [\"Response\"]\n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Train/all_level1_train.csv\", index = False)\n\n\telif file_name == \"test\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Test/all_level1_test.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Test/all_level1_test.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Test/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop(\"Id\",1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col \n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Test/all_level1_test.csv\", index = False)", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def convert_csv_to_alfed(self) -> None:\n global output_path, file_name\n self.parse_command_line_args()\n self.validate_command_line_args()\n\n for _, _, files in walk(self.args.input):\n for output_file in files:\n if output_file.endswith(\".csv\"):\n file_name, _ = path.splitext(output_file)\n output_path = \"\"\n output_path = path.join(self.args.output, file_name)\n\n try:\n mkdir(output_path)\n print(f\"Creating folder {output_path}...\")\n except OSError:\n print(f\"Creation of directory {output_path} failed\")\n\n with open(path.join(self.args.input, output_file), \"rt\") as csv_file:\n reader = DictReader(csv_file, fieldnames=self.args.fieldorder)\n\n for row in reader:\n uid = str(uuid.uuid1()).upper()\n row[\"content\"] = self.replace_embedded_snipptes(row[\"content\"], self.args.lplaceholder,\n self.args.rplaceholder, self.args.changeplaceholders)\n output = dumps(\n {\n \"alfredsnippet\": {\n \"snippet\": row['content'],\n \"uid\": uid,\n \"name\": row['name'],\n \"keyword\": row['abbreviation']\n }\n },\n sort_keys=False, indent=4,\n separators=(',', ': ')\n )\n\n output_file = f\"{row['name']}_[{uid}].json\"\n target = path.join(output_path, output_file)\n f = open(target, \"w\")\n f.write(output)\n f.close()\n print(f\"Writing file {target}...\")\n else:\n self.error_msg(\"The files in the input folder are not with extension '*.csv'\")\n\n subprocess.call(\n [\n 'ditto',\n '--norsrc',\n '-ck',\n output_path,\n self.args.output + \"/\" + file_name + \".alfredsnippets\"\n ]\n )\n print(f\"{self.args.output}/{file_name}.alfredsnippets was created\")\n self.created_folders.append(file_name)\n\n self.remove_temp_folders()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def _load_single_file(self, table_name, manifest_row, csv_reader,\n temp_filepath):\n # get database interface and it's equivalent manifest row\n sql_interface = self._configure_db_interface(\n manifest_row=manifest_row, temp_filepath=temp_filepath)\n\n sql_manifest_row = sql_interface.get_sql_manifest_row()\n\n cleaner = self._get_cleaner(table_name=table_name,\n manifest_row=manifest_row)\n csv_writer = CSVWriter(meta=self.meta,\n manifest_row=manifest_row,\n filename=temp_filepath)\n\n # clean the file and save the output to a local pipe-delimited file\n # if it doesn't have a 'loaded' status in the database manifest\n if csv_reader.should_file_be_loaded(sql_manifest_row=sql_manifest_row):\n print(\" Cleaning...\")\n meta_only_fields = self._get_meta_only_fields(\n table_name=table_name, data_fields=csv_reader.keys)\n for idx, data_row in enumerate(csv_reader):\n data_row.update(meta_only_fields) # insert other field dict\n clean_data_row = cleaner.clean(data_row, idx)\n if clean_data_row is not None:\n csv_writer.write(clean_data_row)\n\n csv_writer.close()\n\n # write the data to the database\n self._update_database(sql_interface=sql_interface)\n\n if not self._keep_temp_files:\n csv_writer.remove_file()", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format" ]
[ "0.66875005", "0.65311", "0.64833546", "0.6277665", "0.6257637", "0.5997841", "0.5968701", "0.58972067", "0.5896129", "0.5890339", "0.58322793", "0.5827144", "0.580506", "0.57693833", "0.5767864", "0.5731467", "0.57120144", "0.56666887", "0.56376743", "0.5627514", "0.5622011", "0.55655795", "0.5558454", "0.55536425", "0.5543435", "0.5540802", "0.55283374", "0.5519627", "0.5484722", "0.54805857" ]
0.69841707
0
Description When is given a csv_filepath and output_filepath and one of the columns has blank character Expected Result creates a json file ignoring blank column
def test_blank_column(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", ""]) writer.writerow(["2023-01-01", "5", "25", ""]) writer.writerow(["2023-01-02", "10", "23", ""]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def test_csv_no_callback(self):\n csvfile = testdata.create_csv({\n \"foo\": testdata.get_int(),\n \"bar\": testdata.get_words(),\n })\n self.assertEqual(1, len(csvfile))", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):\n\n for key in header_csv:\n key_struct = key.split(delimiter)\n if key in dic_types.keys():\n # if no value indicated set to default\n if row[key] == '' and 'default' in dic_types[key].keys():\n row[key] = dic_types[key]['default']\n else:\n try:\n # Cast to indicated type\n row[key] = dic_types[key]['type'](row[key]) \n except:\n print(\" [WARN] Can not parse \", row[key] , \"to type\", dic_types[key]['type'])\n jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))\n \n return jstruct", "def write_to_csv(path,data_dict):\n\n\n schema = [\"file_name\",\"family\",\"genus\",\"genus_confidence\",\n \"species_1\",\"confidence_1\",\"hall_1\",\n \"species_2\",\"confidence_2\",\"hall_2\",\n \"species_3\",\"confidence_3\",\"hall_3\",\n \"species_4\",\"confidence_4\",\"hall_4\",\"peaks\"]\n\n # if no file exists create a one and inform the user\n if not os.path.exists(path):\n print(\"creating new output file {}\".format(path))\n with open(path, \"w\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(schema)\n\n row = []\n\n row.append(data_dict[\"file_name\"])\n row.append(data_dict[\"family\"])\n \n row.append(data_dict[\"genus_1\"])\n row.append(data_dict[\"genus_confidence_1\"][:5])\n \n row.append(data_dict[\"species_1\"])\n row.append(data_dict[\"confidence_1\"][:5])\n row.append(data_dict[\"hall_1\"])\n \n row.append(data_dict[\"species_2\"])\n row.append(data_dict[\"confidence_2\"][:5])\n row.append(data_dict[\"hall_2\"])\n\n row.append(data_dict[\"species_3\"])\n row.append(data_dict[\"confidence_3\"][:5])\n row.append(data_dict[\"hall_3\"])\n\n row.append(data_dict[\"species_4\"])\n row.append(data_dict[\"confidence_4\"][:5])\n row.append(data_dict[\"hall_4\"])\n\n row.append(data_dict[\"peaks\"])\n \n with open(path, \"a\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(row)", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def prepare_out_csv(output_dir, filename):\n out_columns_pi = ['fasta_file', 'acc.code',\n 'organism', 'EC.code', 'species',\n 'note', 'pi', 'modification', 'category']\n string = ''\n for i in out_columns_pi:\n if i == out_columns_pi[-1]:\n string += i\n else:\n string += i+','\n string += '\\n'\n with open(output_dir+filename, 'w') as f:\n f.write(string)", "def _setup_output_file(self):\n\n columns = [\"Hero file\",\n \"Test type\",\n \"Name of tested entry\",\n \"Misc dice sum input\",\n \"Value of tested entry\",\n \"Modifier\",\n \"Values of related attributes\",\n \"Rolls\",\n \"Result\",\n \"Description\",\n \"Timestamp\",\n \"Type of dice input\"]\n\n # if file does not exist, add first row of column names\n if not os.path.isfile(self._result_csv):\n with open(self._result_csv, \"w\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(columns)\n return True\n return False", "def test_write_csv_file(self, tmpdir):\n filename = tmpdir.join(\"output.csv\").strpath\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n csv_formatter.to_csv(self.records, path_or_buf=filename)\n\n csv = open(filename).read()\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n assert csv == csv_expected", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def test_37_bulk_csv_import_no_column_names(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Baz\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n task = db.session.query(Task).first()\r\n assert {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'} == task.info\r\n assert \"1 Task imported successfully!\" in res.data", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def write_csv(invocations, job_information, out_file, null_string =\"NA\"):\n\n\t# assume every invocation of a task of a certain type takes the same number of input files\n\tnum_input_files = len(job_information[invocations[0]]['input_files'])\n\t#file_attributes = [\"input_file_%s_kb\"%i for i in range(1, num_input_files + 1)]\n\tfile_attributes = [\"host_name\", \"input_file_sum_kb\"]\n\tusage_attributes = ['utime', 'stime', 'maxrss', 'nvcsw', 'nivcsw', 'nswap', 'minflt', ] # 'majflt', 'inblock', 'outblock', 'nsignals', 'msgsnd', 'msgrcv', 'nswap'\n\tload_attributes = [\"min1\", \"min5\", \"min15\"]\n\tprocs_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\", \"vmsize\", \"rss\"]\n\ttask_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\",]\n\tram_attributes = [\"total\", \"free\", \"shared\", \"buffer\",]\n\tswap_attributes = [\"total\", \"free\",]\n\tmachine_attributes_headers = load_attributes + list(map(lambda a: \"procs_\"+a, procs_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"task_\"+a, task_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"ram_\"+a, ram_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"swap_\"+a, swap_attributes))\n\n\t# the csv column labels\n\theader = ['run_goup', 'run', 'transformation', 'mainjob_started', \"duration\"] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n#\theader = ['workflow','transformation', 'mainjob_started'] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n\n\twith open(out_file, 'w', newline='') as csvfile:\n\n\t\tspamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tspamwriter.writerow(header)\n\n\t\tfor job_info in [job_information[job_id] for job_id in invocations]:\n\n\t\t\tfile_sizes = [float(file['size']) for file in job_info['input_files']]\n\t\t\tusage_values = [float(job_info['usage'][attr]) for attr in usage_attributes]\n#\n\t\t\ttry:\n\t\t\t\tout_size = sum([float(file['size']) for file in job_info['output_files']])\n\t\t\texcept KeyError as k:\n\t\t\t\tout_size = null_string\n#\n\t\t\tpeak_mem = float(job_info['usage']['maxrss'])\n\t\t\tmachine_values = []\n\n\t\t\tfor machine_attrs, attrs in [(\"load\", load_attributes), (\"procs\", procs_attributes), (\"task\", task_attributes), (\"ram\", ram_attributes), (\"swap\", swap_attributes)]:\n\t\t\t\tfor attr in attrs:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmachine_values.append(job_info[machine_attrs][attr])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tmachine_values.append(null_string)\n\n#\t\t\tdata = [job_info[\"workflow\"], job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tdata = [job_information[\"run_group\"], job_information[\"run\"], job_info[\"transformation\"], job_info['mainjob_started_ts'], job_info[\"mainjob_duration\"]] + [job_info['host_name']] + [sum(file_sizes)] + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n#\t\t\tdata = [job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tspamwriter.writerow(data)" ]
[ "0.6778147", "0.66215456", "0.6587748", "0.6419415", "0.6071375", "0.60661834", "0.6063572", "0.6040226", "0.60079324", "0.59419805", "0.5918246", "0.5916295", "0.5871361", "0.58706796", "0.58664465", "0.5850558", "0.5849682", "0.5848059", "0.5840836", "0.58217335", "0.57717675", "0.57646155", "0.57580245", "0.5730074", "0.5689477", "0.5650869", "0.5631725", "0.562486", "0.5623761", "0.5615128" ]
0.6656623
1
Delete a log file.
def delete_log(file_path): if os.path.exists(file_path): print('Deleting log %s...' % file_path) os.remove(file_path) else: raise ValueError("File %r doesn't exists - cannot delete." % file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()", "def delete_file(fileName):\n os.remove(fileName)\n print (\"Deleteing file: \" + str(fileName))\n write_log()\n read_log()", "def delete_log(filename):\n log_directory = os.path.dirname(os.path.abspath(__file__)) + LOG_FOLDER\n response_code = 400\n response = \"\"\n if filename in os.listdir(log_directory):\n try:\n os.remove(os.path.join(log_directory, filename))\n response = f\"File {filename} was successfully deleted.\"\n response_code = 200\n except IsADirectoryError:\n response = f\"{filename} exists, but is a directory and not a file. Deletion failed.\"\n else:\n response = f\"File {filename} does not exist and so couldn't be deleted.\"\n return make_response(jsonify({'message': response}), response_code)", "def delete_log(self):\n os.system('rm -rf *.log')\n os.system('rm -rf *.log~')\n os.system('rm -rf *.last')\n os.system('rm -rf *.last~')", "def deleteGmlLoaderLogFile(logFile, command, logger):\n \n if os.path.isfile(logFile) == True:\n reader = open(logFile)\n \n for line in reader:\n if re.search(\"TransactionHandler - Rollback transaction\", line) != None:\n logger.error(\"TransactionHandler - Rollback transaction for \" + command)\n \n reader.close()\n message = \"Delete \" + logFile + \" \" + str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.gmtime(os.path.getmtime(logFile)))) + \" \" + str(os.path.getsize(logFile)) + \" bytes\"\n logger.info(message)\n os.remove(logFile)", "def delete(self, filename):\n pass", "def delete_record_file(self, record_file, logStat):\n result = self.storage_delete_file(record_file.group, record_file.storage)\n if result:\n logStat(deleted=True, file_obj=record_file)\n record_file.delete()\n return result", "def delete_file(self, lfile):\n raise NotImplementedError('delete_file')", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def delete_logs(self):\n if self.etw_log is not None:\n files = sorted(glob.glob(self.etw_log + '*'))\n for path in files:\n try:\n os.remove(path)\n except Exception:\n pass", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _clear_log(log_path):\n\n\twith logging._lock:\n\t\twith open(log_path, 'w'):\n\t\t\tpass", "def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete_file(path):\n return files.delete_file(path)", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(self, filename):\n raise NotImplementedError", "def delete_file(self):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n os.remove(self.location)", "def delete_file(file: str) -> None:\n\tuux.show_info(\"Deleting \" + file)\n\n\tif not os.path.exists(file):\n\t\t# Files does not exist\n\t\treturn\n\n\tos.remove(file)", "def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def safe_delete(self, filename):\n try:\n os.remove(filename)\n except OSError:\n pass", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def delete_file(input_fn):\r\n if os.path.isfile(input_fn):\r\n os.remove(input_fn)" ]
[ "0.77605826", "0.7727856", "0.73945826", "0.7313134", "0.72794944", "0.7045915", "0.7009386", "0.6897459", "0.6797494", "0.6758738", "0.6743915", "0.6732991", "0.671415", "0.66890925", "0.6686626", "0.6566584", "0.6561632", "0.6561151", "0.65555596", "0.6545941", "0.6545941", "0.6531315", "0.65161645", "0.65148914", "0.6387", "0.63710403", "0.6344273", "0.6332207", "0.62937456", "0.6291752" ]
0.8215973
0
retorna o valor de graus Farenheit convertido para Celsius
def toCelsius(farenheit): return (farenheit - 32)*5 / 9
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def convert_f_to_c(temp_in_farenheit):\n celcius_temp = round(float((temp_in_farenheit) - 32)*(5/9),1)\n return(celcius_temp)", "def convert_f_to_c(temp_in_farenheit):\n temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9\n temp_in_celcius = round(temp_in_celcius, 1)\n return temp_in_celcius", "def convert_f_to_c(temp_in_farenheit):\n cel = round((((temp_in_farenheit - 32) * 5) / 9),1)\n return cel", "def fahr_to_celsius(temp):\n tempInCel = (temp - 32) * 5/9\n return tempInCel", "def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)", "def temperature() -> float:", "def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"", "def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9", "def fahr_to_celcius(temp_fahr):\n temp_celcius = (temp_fahr - 32) * 5/9\n return temp_celcius", "def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def celsius_to_fahr(temp):\n return temp * (9/5) + 32", "def fahrenheit(celsius):\n return 9 / 5 * celsius + 32", "def GetFahrenheit(self):\n return self.GetCelcius()*1.8+32", "def cels_fahr(cels):\n temp = cels * 9.0 / 5 + 32\n return temp", "async def c(self, f : float):\n c = (f-32) * 5/9\n await self.bot.say(\"{0} Celsius\".format(c))", "def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9", "def toCelcius (x):\r\n\r\n\tc = x-32\r\n\tc = 5*c/9\r\n\treturn c", "def translate_from_farenheit_to_celsius(farenheit: float) -> float:\n return (farenheit - 32) * 5./9.", "def fahr_to_celsius(fahr):\n result_in_celsius = (fahr - 32) + 5/9\n return result_in_celsius", "def celsius_conv(self, f):\n if f == 0:\n return -17.7778\n else:\n return (f - 32.0) * (5.0 / 9.0)", "def cels_to_fahr():\n while True:\n celsius = input(\"Podaj temperaturę w stopniach Celsjusza: \")\n try:\n int(celsius)\n break\n except ValueError:\n try:\n float(celsius)\n break\n except ValueError:\n print(\"Nieprawidłowe dane, podaj temperaturę jako wartość liczbową.\")\n print('''Wzór na przeliczanie stopni Celsjusza na stopnie Fahrenheita:\n [\\u00b0F] = [\\u00b0C] * 9/5 + 32''')\n print(\"Podana temperatura przeliczona na stopnie Fahnrenheita: \", end=\"\")\n print(float(celsius) * 9 / 5 + 32)", "def f2c_qa_function():\n F = float(input(\"Provide a Fahrenheit temperature in degrees: \"))\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def kelvin_to_celsius(temp):\n return temp - 273.15", "def fahrenheit(celsius):\n return ((celsius/5)*9)+32", "def convert_to_celsius(self):\n try:\n self.root.ids.fahrenheit_input.hint_text = 'Enter amount in Fahrenheit'\n self.root.ids.celsius_input.text = '{:.2f}'.format((float(self.root.ids.fahrenheit_input.text) - 32)\n * 5 / 9)\n except ValueError:\n self.root.ids.fahrenheit_input.text = ''\n self.root.ids.fahrenheit_input.hint_text = 'Invalid number'", "def fahrenheitToCelcius(fahrenheit:float, ndigits = 2)->float:\n return round((float(fahrenheit) - 32) * 5 / 9, ndigits)" ]
[ "0.8010471", "0.7904015", "0.7749893", "0.7748052", "0.77476305", "0.77396774", "0.76843536", "0.7657182", "0.76182777", "0.7557405", "0.74652946", "0.72664905", "0.7263454", "0.7263454", "0.7261374", "0.72476894", "0.7239142", "0.72153395", "0.7209384", "0.7199643", "0.7175355", "0.7162034", "0.7154658", "0.7147282", "0.71151745", "0.70987517", "0.7053965", "0.70083064", "0.700558", "0.6982957" ]
0.8065382
0
Return github API URL as string
def get_api_url(self): url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \ self.repo, self.product) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def github_url(self):\n return self.github.replace('.git', '')", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def repo_link(repo):\n return \"https://github.com/\" + repo", "async def _api_url(self) -> URL:\n return await self._gitlab_api_url(\"\")", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url", "def getProjectURL():", "def get_api_url() -> str:\n\n\tsite = pywikibot.Site()\n\turl = site.protocol() + \"://\" + site.hostname() + site.apipath()\n\treturn url", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def repo_url(self):\n return self._repo_url", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def api_url(self, command: str) -> str:\n base_url = self.base_url\n path = \"/\".join(x for x in f\"{base_url.path}/api/v2\".split(\"/\") if x != \"\")\n return URL.build(\n scheme=base_url.scheme,\n host=base_url.host,\n port=base_url.port,\n path=f\"/{path}\",\n query={\"apikey\": self.api_token, \"cmd\": command},\n ).human_repr()", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def git_remote_url(self):\n return self._git_remote_url", "def api_url(self):\n return self.get_api_url()", "def _get_api_url(self):\n return \"%s/%s/\" % (settings.API_URL, settings.API_VERSION)", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_api_url(settings: Settings) -> str:\n return _get_control(settings) \\\n .get('provider', {}).get('arguments', {}) \\\n .get('api_url', '')", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def url(self, api_name):\n return \"https://%s/api/%s/%s/\" % (self.host, self.api_version, api_name)", "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def api_url(self):\n return self._api_url", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host" ]
[ "0.79492223", "0.72487265", "0.7181632", "0.7154561", "0.7106888", "0.6985907", "0.6885024", "0.68606704", "0.682998", "0.6809253", "0.6746705", "0.67292404", "0.6727469", "0.67142564", "0.6706738", "0.66876775", "0.66692704", "0.66497993", "0.6614878", "0.6613082", "0.6584799", "0.65657115", "0.6516051", "0.65140766", "0.64963007", "0.6494963", "0.6471926", "0.641741", "0.6413179", "0.6396774" ]
0.83484757
0
Get a specific tag's data from Github API.
def get_tag(self, sha): return self.get_url_data(self.api_url + 'tags/' + sha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tag(self, tag):\n resp = self.get(_u.build_uri(\"tag\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)", "def find_by_id(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.get(path, params, **options)", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()", "def info(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/{0}?access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n print(request.headers)\n return request.json()", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def read_tag(\n *,\n db: Session = Depends(get_db),\n id: int,\n current_user: DBUser = Depends(get_current_active_user),\n):\n tag = crud.tag.get(db_session=db, id=id)\n if not tag:\n raise HTTPException(status_code=404, detail=\"Tag not found\")\n if not crud.user.is_superuser(current_user) and (tag.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n return tag", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })", "def get(self, uuid):\n\n\t\treturn self._get(\"/tag/%s\" % base.getid(uuid), \"tag\")", "async def get_tag_command(self, ctx):\n await self.get_tag(ctx)", "def get(self, endpoint, page=1, params=None):\n url = 'https://api.github.com/%(endpoint)s?access_token=%(token)s&page=%(page)d' % {\n 'endpoint': endpoint,\n 'token': self.access_token,\n 'page': page,\n }\n if params is not None:\n url += '&' + urlencode(params)\n response = requests.get(url)\n\n # Produce specific error on 404. Generic HTTPError otherwise.\n if response.status_code == 404:\n raise ResourceNotFound\n response.raise_for_status()\n\n return response.json()", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def get_pull_request(project, num, github_api=3):\r\n if github_api==2 :\r\n url = \"http://github.com/api/v2/json/pulls/{project}/{num}\".format(project=project, num=num)\r\n elif github_api == 3:\r\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n if github_api == 2 :\r\n return json.loads(response.text)['pull']\r\n return json.loads(response.text)", "def get_from_git(project, obj, params={}, verbose=0):\n\n url = \"%s%s/raw/%s\" % (GIT_URL, project, obj)\n return load_yaml(requester(url, params=params,\n headers={'Accept': 'application/json'},\n verbose=verbose).text)", "def get(self, hash_tag):\n request_args = get_current_request_args()\n\n scope = request_args.get('scope') or DEFAULT_HASH_TAG_FETCH_SCOPE\n if scope not in HASH_TAG_RETRIEVAL_SCOPES:\n raise BadRequest(\n '`scope` must be one of {}'.format(HASH_TAG_RETRIEVAL_SCOPES))\n\n hash_tag = HashTag.get_not_deleted(hash_tag=hash_tag)\n if hash_tag is None:\n raise ResourceNotFound('Hash tag not found')\n\n hash_tag_details = {\n 'meta': lambda x: {\n 'data': None,\n 'meta': None\n },\n 'posts': lambda y: {\n 'data': None,\n 'meta': None\n },\n 'followers': lambda z: {\n 'data': None,\n 'meta': None\n }\n }\n\n scoped_details = hash_tag_details[scope]()\n\n return api_success_response(**scoped_details)", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def getTag(self, authenticationToken, guid):\r\n pass", "def get_tag_by_id(self,\r\n access_token,\r\n tag_id):\r\n\r\n # Prepare query URL\r\n _url_path = '/tags/{tag_id}'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, { \r\n 'tag_id': tag_id\r\n })\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'access_token': access_token\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 0:\r\n raise APIException('Unexpected error.', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Tag.from_dictionary)", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def search(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/search?q={0}&access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n return request.json()", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def get_tag(self, tag, filename):\n return self.get_tag_batch(tag, [filename])[0]", "def fetch(self, tag):\n return fetch_image(self.collection.client, tag)", "def instagramrequest(tag_name, max_tag_id=0):\n request_string = '?client_id=b865ec47b91346f3a2cbcfe04a6a80d9'\n if max_tag_id:\n request_string += '&max_tag_id='+str(max_tag_id)\n response = urlopen('https://api.instagram.com/v1/tags/'+tag_name+'/media/recent'+request_string)\n content = response.readall()\n return json.loads(content.decode(encoding='utf-8', errors='ignore'))", "def pull(self, repo, tag):\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)" ]
[ "0.67847025", "0.6460267", "0.63765323", "0.6361611", "0.6254757", "0.6193532", "0.6165435", "0.5924588", "0.5913108", "0.5913108", "0.5889115", "0.58397275", "0.5831963", "0.58196306", "0.581669", "0.57963234", "0.57881856", "0.57807654", "0.57554406", "0.57134587", "0.56990176", "0.5679682", "0.56588626", "0.5614263", "0.5614167", "0.55856615", "0.5577077", "0.55760366", "0.55580515", "0.5551342" ]
0.65368634
1
Github API can only return all tags, but we only want the latest.
def get_latest_tags(self): start = len(self.tags) - self.num_comparisons tags = self.tags latest = [] for i in xrange(len(tags)): if i >= start: parts = tags[i]['ref'].split('/') release_num = parts[2] sha = tags[i]['object']['sha'] tag = [release_num, sha] latest.append(tag) return latest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def get_last_tag_by_date(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n output = output.splitlines()\n if len(output) == 0:\n return ''\n return output[-1]", "def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag", "def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)", "def get_most_recent_tarball(self, pkg):\n pass", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]", "def _sort_latest_tag(self, versions: List[dict], tag_key: str) -> Dict:\n return next(\n iter(\n sorted(\n versions,\n reverse=True,\n key=lambda s: list(\n map(\n int,\n filter(None, re.sub(r\"[^0-9.]+\", \"\", s.get(tag_key), re.I).split(\".\")),\n )\n )\n if \".\" in s.get(tag_key)\n else [-1],\n )\n )\n )", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def latest_tagged_video(tag):\n if not isinstance(tag, Tag):\n try:\n tag = Tag.objects.get(text=tag)\n except Tag.DoesNotExist:\n return mark_safe('')\n video = first_or_none(Video.objects.filter(tags=tag)\n .order_by('-issue__issue_date'))\n if video:\n return mark_safe(video.key)\n return mark_safe('')", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _fetch_latest_for_tag(self, tag, today):\n result = []\n url = Fetch163.search_link % urllib2.quote(tag.name.encode('utf8'))\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError as e:\n urllib_error(e)\n else:\n doc = eval(resp.read())\n if doc and type(doc) is list:\n if today:\n news_today = self._today_filter(doc, delta=2)\n else:\n news_today = doc\n for d in news_today:\n docid = d.get('docid', '')\n #title = u'%s' % d.get('title', '')\n # the d.get('title') is a unicode string represent by\n # python str, so use unicode-escape to decode it.\n title = d.get('title', '')\n #print type(title)\n news_title = self._trans_title(title)\n if docid and title:\n news_exits = News.objects.filter(\n Q(docid=docid) | Q(title=news_title)\n )\n #print docid, news_title, news_exits\n intro, body, c_num, ptime, pic = self._fetch_news(docid)\n if not news_exits:\n print 'new news', news_title, docid\n news = News()\n news.docid = docid\n news.title = news_title\n news.content = body\n news.tag = tag\n news.comment_num = c_num\n news.list_pic = pic\n news.abstract = intro\n news.update_time = ptime\n news.save()\n import time\n time.sleep(2)\n if news:\n result.append(news)\n else:\n print 'update news', news_title\n n = news_exits[0]\n print 'old:', n.comment_num, 'new:', c_num\n n.comment_num = c_num\n n.save()\n else:\n print 'Fetch news for tag: %s, Error' % tag.name\n\n return result", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def test_none_return_if_all_excluded(self): # pylint: disable=invalid-name\n tags = [_TagInfo('1.0.1', 'commit1', ''),\n _TagInfo('notsemver', 'commit2', '')]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def get_latest_items(parser, token):\n bits = token.split_contents()\n\n if len(bits) != 4:\n raise TemplateSyntaxError, \"get_latest_item tag takes exactly three arguments\"\n if bits[2] != 'as':\n raise TemplateSyntaxError, \"second argument to get_latest_item tag must be 'as'\"\n return LatestItemNode(bits[1], bits[3])", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def get_latest_posts(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\t\n\tm = re.search(r'(.*?) as (\\w+)', arg)\n\t\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\t\n\tformat_string, var_name = m.groups()\n\t\n\treturn LatestPosts(format_string[0], var_name)", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)" ]
[ "0.7582535", "0.67998123", "0.65141267", "0.64969236", "0.6446542", "0.640628", "0.6393738", "0.6162547", "0.5983191", "0.5975903", "0.59552914", "0.5918902", "0.5868156", "0.58608156", "0.5853621", "0.58516884", "0.5850484", "0.5837113", "0.5833859", "0.5821743", "0.5812356", "0.5806186", "0.57818055", "0.57770026", "0.5776528", "0.5763899", "0.5745858", "0.5714461", "0.5693107", "0.5693107" ]
0.6986198
1
Return github tag release URL as string
def get_url_tag_release(self, release_num): url = 'https://{}/{}/{}/releases/tag/{}'.format( HOST_GITHUB, self.repo, self.product, release_num ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url", "def github_url(self):\n return self.github.replace('.git', '')", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def ticket_url_or_tag(tag: str) -> str:\n url = _url_if_url(get_url_from_tag, tag)\n return _value_with_url(tag, url) if url else tag", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def svnurl(self):\r\n info = self.info()\r\n return py.path.svnurl(info.url)", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def repo_url(self):\n return self._repo_url", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")" ]
[ "0.7346837", "0.7119125", "0.6906928", "0.6849196", "0.6814661", "0.6633057", "0.6601647", "0.65425444", "0.65085566", "0.6498361", "0.6480949", "0.6473229", "0.63616604", "0.63453585", "0.63183016", "0.62817633", "0.62013495", "0.6147767", "0.61303365", "0.61104536", "0.6100949", "0.60963523", "0.6080163", "0.6078019", "0.6056599", "0.6055186", "0.6023433", "0.6011335", "0.6003082", "0.5991574" ]
0.7984513
0
Return github tag commit SHA URL as string
def get_url_tag_commit(self, git_sha): url = 'https://{}/{}/{}/commit/{}'.format( HOST_GITHUB, self.repo, self.product, git_sha ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def github_url(self):\n return self.github.replace('.git', '')", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]", "def get_url_tag_release(self, release_num):\n\n url = 'https://{}/{}/{}/releases/tag/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n release_num\n )\n return url", "def get_version(git_repo, commit):\n version = git_repo.rev_parse(commit, short=7)\n try:\n version = \"%s@%s\" % (git_repo.find_tag(commit), version)\n except GitRepositoryError:\n pass\n\n return version", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()", "def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)", "def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)", "def sha(location, rev):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git rev-parse --verify {}'.format(rev)\n return subprocess.check_output(cmd, shell=True).strip()", "def _tag_to_sha1(self):\n def get_sha1(url):\n # Ceph (and other projects) uses annotated tags for releases. This\n # has the side-effect of making git ls-remote return the sha1 for\n # the annotated tag object and not the last \"real\" commit in that\n # tag. By contrast, when a person (or a build system) issues a\n # \"git checkout <tag>\" command, HEAD will be the last \"real\" commit\n # and not the tag.\n # Below we have to append \"^{}\" to the tag value to work around\n # this in order to query for the sha1 that the build system uses.\n return repo_utils.ls_remote(url, \"%s^{}\" % self.tag)\n\n git_url = repo_utils.build_git_url(self.project)\n result = get_sha1(git_url)\n # For upgrade tests that are otherwise using ceph-ci.git, we need to\n # also look in ceph.git to lookup released tags.\n if result is None and 'ceph-ci' in git_url:\n alt_git_url = git_url.replace('ceph-ci', 'ceph')\n log.info(\n \"Tag '%s' not found in %s; will also look in %s\",\n self.tag,\n git_url,\n alt_git_url,\n )\n result = get_sha1(alt_git_url)\n\n if result is None:\n raise CommitNotFoundError(self.tag, git_url)\n return result", "def ticket_url_or_tag(tag: str) -> str:\n url = _url_if_url(get_url_from_tag, tag)\n return _value_with_url(tag, url) if url else tag", "def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def get_git_revision_hash():\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('ascii')", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def get_commit():\n cmd = \"git rev-parse HEAD\"\n result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\").strip()", "def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')" ]
[ "0.71973187", "0.69282585", "0.6792901", "0.6748358", "0.66634905", "0.6656634", "0.6605502", "0.6590671", "0.65770376", "0.6560338", "0.6532101", "0.65264523", "0.6505128", "0.6500491", "0.6499007", "0.64897436", "0.63229996", "0.6280397", "0.6262036", "0.6240915", "0.61932933", "0.6188964", "0.61703473", "0.61590856", "0.6153254", "0.6153118", "0.61395156", "0.61198765", "0.61079973", "0.60922277" ]
0.8073723
0
Parse CHANGELOG for latest tag.
def get_changelog(self, commit_sha): url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG' url = url.format(HOST_GITHUB_RAW, self.repo, self.product) req = requests.get(url) lines = req.text first = self.latest_tags[self.num_comparisons - 1][VERS] last = self.latest_tags[self.num_comparisons - 2][VERS] flag = False log = '' for line in lines.splitlines(): if first in line: flag = True if last in line: flag = False if flag: log += line + '\n' return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest", "def parse_changelog(filename):\n with open(filename, 'r') as changelog:\n for line in changelog.readlines():\n if re.match(r'^ .*<.*@.*> [A-Z][a-z][a-z], [0-9][0-9]', line):\n return re.split(r'^ .*<.*@.*>', line)[1].strip()", "def parse_tag(self, tag):\n \n mytag = \"latest\"\n mydigest = None\n\n regex = \"([\\w\\d\\.\\-]+)@?([\\w\\d\\.\\-]*)$\"\n\n regex_matched = re.match(regex, tag)\n mytag = regex_matched.group(1)\n mydigest = regex_matched.group(2)\n \n if regex_matched is None:\n mytag = \"latest\"\n\n return (mytag, mydigest)", "def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()", "def parseLog(self, log):\n return 0", "def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag", "def parse_svn_log_xml(xml_string):\r\n l = []\r\n tree = ET.fromstring(xml_string)\r\n for entry in tree.findall('logentry'):\r\n d = {}\r\n d['revision'] = int(entry.get('revision'))\r\n # Some revisions don't have authors, most notably\r\n # the first revision in a repository.\r\n author = entry.find('author')\r\n d['author'] = author is not None and author.text or None\r\n d['date'] = svn_date_to_timestamp(entry.find('date').text)\r\n # Some revisions may have empty commit message\r\n message = entry.find('msg')\r\n message = message is not None and message.text is not None \\\r\n and message.text.strip() or \"\"\r\n # Replace DOS return '\\r\\n' and MacOS return '\\r' with unix return '\\n'\r\n d['message'] = message.replace('\\r\\n', '\\n').replace('\\n\\r', '\\n'). \\\r\n replace('\\r', '\\n')\r\n paths = d['changed_paths'] = []\r\n for path in entry.findall('.//path'):\r\n copyfrom_rev = path.get('copyfrom-rev')\r\n if copyfrom_rev:\r\n copyfrom_rev = int(copyfrom_rev)\r\n paths.append({\r\n 'path': path.text,\r\n 'action': path.get('action'),\r\n 'copyfrom_path': path.get('copyfrom-path'),\r\n 'copyfrom_revision': copyfrom_rev,\r\n })\r\n l.append(d)\r\n return l", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def parseMergeChangeLogNodes( stdout ):\n result = []\n for l in stdout.split('\\n'):\n m = CHANGESET.match( l )\n if m:\n result.append( m.group(1) )\n return result", "def parseLog(self, log_lines):\n abstract", "def parse(file):\n logger.info('parsing DL7 dive log data')\n log = Log()\n content = file.readline()\n while not content == '':\n __parse_line(log, content)\n content = file.readline()\n return log", "def get_latest_rev(changesfile):\n if os.path.exists(changesfile):\n with open(changesfile) as chlog:\n line = chlog.readline()\n return line.strip().split(\" \")[-1].split(\"@\")[-1]\n return ''", "def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''", "def get_last_tag_by_date(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n output = output.splitlines()\n if len(output) == 0:\n return ''\n return output[-1]", "def call_change_log(input_filter):\n try:\n if input_filter is None:\n latest = _find_latest()\n service_endpoint = _find_filter(\"change_log\")\n else:\n keyword = input_filter.split(\" \")[0]\n if \"release\" == keyword or \"build\" == keyword:\n service_endpoint = _find_filter(input_filter.split(\";\")[2])\n else:\n service_endpoint = _find_filter(keyword)\n\n rel_build = input_filter.replace(\"_\", \".\").split(\" \")[1].split(\";\")\n\n if \"build\" == keyword:\n latest_rel = rel_build[1]\n latest_bui = rel_build[0]\n else:\n latest_rel = rel_build[0]\n latest_bui = rel_build[1]\n\n latest = {\"latest_val\": latest_rel + \"_\" + latest_bui,\n \"second_latest_val\": latest_rel + \"_\" + str(int(latest_bui)-1)}\n\n latest_query = latest[\"second_latest_val\"] + \"..\" + latest[\"latest_val\"]\n data = _call_rest_api(service_endpoint + \"/\" + latest_query, None)\n except Exception as e:\n logger.error(str(e))\n data = {\"success\": \"\", \"data\": {}, \"error\": {\"Message\": str(e)}}\n data = jsonify(data)\n return data", "def _load_changelog(self):\n\n changelog_json_file = self._project.get_changelog_path()\n if not os.path.isfile(changelog_json_file):\n logger.warning('Changelog File \"{}\" does not exists!'.format(changelog_json_file))\n return\n\n logger.warning('Loading Changelog from: \"{}\"'.format(changelog_json_file))\n\n with open(changelog_json_file, 'r') as f:\n if changelog_json_file.endswith('.json'):\n changelog_data = json.load(f, object_pairs_hook=OrderedDict)\n else:\n changelog_data = yaml.load(f, Loader=yamlordereddictloader.Loader)\n if not changelog_data:\n return\n\n changelog_versions = [key for key in changelog_data.keys()]\n ordered_versions = self._order_changelog_versions(changelog_versions)\n\n for version in reversed(ordered_versions):\n self._create_version(str(version), changelog_data[str(version)])\n\n last_version_item = self.version_accordion.item_at(0)\n last_version_item.set_collapsed(False)", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_newest_changefile_info(changefile_type):\n url = get_url(changefile_type) + \"/state.txt\"\n changefile_timestamp = None\n file_sequence_number = 0\n for result in urllib.urlopen(url):\n # get sequence number\n sequence_number_p = result.find(\"sequenceNumber=\")\n if sequence_number_p != -1:\n file_sequence_number = int(result[sequence_number_p + 15:])\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n else:\n logging.info(\"newest %s timestamp: %s\" % \\\n (changefile_type, changefile_timestamp.isoformat()))\n return (changefile_timestamp, file_sequence_number)", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def aggregate_git_log(path, progress_callback=lambda progress: None):\n versions = list()\n\n current_version, current_commits = None, list()\n\n log_data = git_log_hash(path)\n log_length = len(log_data)\n progress_step = max(1, log_length / 100)\n \n for idx, (rev_hash, date, msg) in enumerate(log_data):\n if idx % progress_step == 0:\n progress_callback(float(idx) / log_length)\n \n current_commits.append(msg)\n if git_checkout(path=path, revision_hash=rev_hash):\n version = get_package_metadata(path=path, field_name='Version')\n if version != current_version:\n # memorize it\n versions.insert(0,\n dict(version=version,\n date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n current_version, current_commits = version, list()\n\n if current_commits:\n versions.insert(0,\n dict(version='newest',\n date=None,\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n return versions", "def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]", "def get_changelog(no):\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]", "def parse_log_file(self):\n # Open log file\n log_file_data = utils.open_file(self.log_file)\n for line in log_file_data:\n algo = line.strip(\"\\n\").split(\":\")[1]\n if len(algo) > 3:\n hash_algo = algo.split(\"$\")[1]\n if hash_algo not in self.used_algo:\n self.used_algo.append(hash_algo)" ]
[ "0.616343", "0.6080337", "0.6000073", "0.59245783", "0.58972937", "0.5849398", "0.58468693", "0.57212126", "0.569988", "0.56685996", "0.5651651", "0.5609761", "0.5598691", "0.5501291", "0.549835", "0.54694337", "0.5457894", "0.5446662", "0.5412487", "0.53381616", "0.53150606", "0.53147554", "0.5304559", "0.52862513", "0.5274378", "0.52742696", "0.52488697", "0.52463675", "0.5225317", "0.5218753" ]
0.6108666
1
Constructs release notes for Bugzilla service deployment ticket.
def get_release_notes(self): notes = self.output.get_header('RELEASE NOTES') notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \ self.repo, self.product) + '\n' notes += self.output.get_sub_header('COMPARISONS') notes += self.get_comparison(self.latest_tags[0][VERS], self.latest_tags[1][VERS]) if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1): notes += self.get_comparison(self.latest_tags[1][VERS], self.latest_tags[2][VERS]) if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW: notes += self.get_comparison(self.latest_tags[2][VERS], self.latest_tags[3][VERS]) tag_data = self.get_tag(self.latest_tags[3][SHA]) notes += self.output.get_sub_header('TAGS') notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\n' notes += self.get_url_tag_commit(tag_data["object"]["sha"]) + '\n' changelog = self.get_changelog(tag_data["object"]["sha"]) if changelog: notes += self.output.get_sub_header('CHANGELOG') notes += changelog return notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'", "def generate_release_notes(project_id, endstr = ' <br>', **config):\n\n gl = gitlab.Gitlab(**config)\n project = gl.projects.get(project_id)\n\n if not project.mergerequests.list(state='merged'):\n raise ValueError(f\"There is not merged merge request for project {project_id} {project.name}\")\n\n if not project.releases.list():\n log = f\"Changelog of {project.name}:{endstr}\"\n last_date = '0000-01-01T00:00:00Z'\n else:\n last_release = project.releases.list()[0]\n log = f\"Changelog since release {last_release.name} of {project.name}:{endstr}\"\n last_date = last_release.released_at\n\n page = 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page)\n if not list_mrs:\n log += f\"There is no merged merge request after {last_date}\"\n return log\n\n while list_mrs:\n for mr in list_mrs:\n line = f\" * {mr.title} (@{mr.author['username']}){endstr}\"\n log += line\n\n page += 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page\n )\n\n return log", "def generate_release_notes(repo, repo_path,\n start_revision, end_revision,\n show_dates, skip_requirement_merges,\n is_stable, series,\n email, email_from,\n email_reply_to, email_tags,\n include_pypi_link,\n changes_only,\n first_release,\n deliverable_file, description,\n publishing_dir_name,\n ):\n repo_name = repo.split('/')[-1]\n # Determine if this is a release candidate or not.\n is_release_candidate = 'rc' in end_revision\n\n # Do not mention the series in independent model since there is none\n if series == 'independent':\n series = ''\n\n if not email_from:\n raise RuntimeError('No email-from specified')\n\n # Get the commits that are in the desired range...\n git_range = \"%s..%s\" % (start_revision, end_revision)\n if show_dates:\n format = \"--format=%h %ci %s\"\n else:\n format = \"--oneline\"\n cmd = [\"git\", \"log\", \"--no-color\", format, \"--no-merges\", git_range]\n stdout = run_cmd(cmd, cwd=repo_path)\n changes = []\n for commit_line in stdout.splitlines():\n commit_line = commit_line.strip()\n if not commit_line or is_skippable_commit(skip_requirement_merges,\n commit_line):\n continue\n else:\n changes.append(commit_line)\n\n # Filter out any requirement file changes...\n requirement_changes = []\n requirement_files = list(glob.glob(os.path.join(repo_path,\n '*requirements*.txt')))\n if requirement_files:\n cmd = ['git', 'diff', '-U0', '--no-color', git_range]\n cmd.extend(requirement_files)\n stdout = run_cmd(cmd, cwd=repo_path)\n requirement_changes = [line.strip()\n for line in stdout.splitlines() if line.strip()]\n\n # Get statistics about the range given...\n cmd = ['git', 'diff', '--stat', '--no-color', git_range]\n stdout = run_cmd(cmd, cwd=repo_path)\n diff_stats = []\n for line in stdout.splitlines():\n line = line.strip()\n if not line or line.find(\"tests\") != -1 or line.startswith(\"doc\"):\n continue\n diff_stats.append(line)\n\n # Extract + valdiate needed sections...\n sections = parse_deliverable(\n series, repo_name, deliverable_file=deliverable_file)\n change_header = [\"Changes in %s %s\" % (repo, git_range)]\n change_header.append(\"-\" * len(change_header[0]))\n\n # Look for reno notes for this version.\n if not changes_only:\n logging.getLogger('reno').setLevel(logging.WARNING)\n cfg = reno_config.Config(\n reporoot=repo_path,\n )\n branch = None\n if is_stable and series:\n branch = 'origin/stable/%s' % series\n cfg.override(branch=branch)\n ldr = loader.Loader(conf=cfg, ignore_cache=True)\n if end_revision in ldr.versions:\n rst_notes = formatter.format_report(\n loader=ldr,\n config=cfg,\n versions_to_include=[end_revision],\n )\n reno_notes = rst2txt.convert(rst_notes).decode('utf-8')\n else:\n LOG.warning(\n ('Did not find revision %r in list of versions '\n 'with release notes %r, skipping reno'),\n end_revision, ldr.versions,\n )\n reno_notes = ''\n else:\n reno_notes = ''\n\n # The recipient for announcements should always be the\n # [email protected] ML (except for\n # release-test)\n email_to = '[email protected]'\n if repo_name == 'openstack-release-test':\n email_to = '[email protected]'\n\n params = dict(sections)\n params.update({\n 'project': repo,\n 'description': description,\n 'end_rev': end_revision,\n 'range': git_range,\n 'lib': repo_path,\n 'skip_requirement_merges': skip_requirement_merges,\n 'changes': changes,\n 'requirement_changes': requirement_changes,\n 'diff_stats': diff_stats,\n 'change_header': \"\\n\".join(change_header),\n 'emotion': random.choice(EMOTIONS),\n 'stable_series': is_stable,\n 'series': series,\n 'email': email,\n 'email_from': email_from,\n 'email_to': email_to,\n 'email_reply_to': email_reply_to,\n 'email_tags': email_tags,\n 'reno_notes': reno_notes,\n 'first_release': first_release,\n 'publishing_dir_name': publishing_dir_name,\n })\n if include_pypi_link:\n params['pypi_url'] = PYPI_URL_TPL % repo_name\n else:\n params['pypi_url'] = None\n\n response = []\n if changes_only:\n response.append(expand_template(CHANGES_ONLY_TPL, params))\n else:\n if email:\n email_header = expand_template(EMAIL_HEADER_TPL.strip(), params)\n response.append(email_header.lstrip())\n if is_release_candidate:\n response.append(expand_template(RELEASE_CANDIDATE_TPL, params))\n else:\n header = expand_template(HEADER_RELEASE_TPL.strip(), params)\n response.append(parawrap.fill(header))\n response.append(expand_template(CHANGE_RELEASE_TPL, params))\n return '\\n'.join(response)", "def make_release_notes(src, dst) -> str:\n result = _subprocess(['git', 'log', '--pretty=format:\"%s\"', f\"origin/{src}...origin/{dst}\"])\n commits = \"\\n\".join([f\"- {i[1:-1]}\" for i in result.split(\"\\n\")])\n\n if args.release_notes:\n with open(args.release_notes, 'w') as f:\n f.write(commits)\n\n return commits", "def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note", "def create_release(release_files, changelog=\"\", output=\"\") -> str:\n release_notes = \"\"\n if 'TRAVIS_TAG' not in os.environ or not os.environ['TRAVIS_TAG']:\n print('No git tag: not deploying anything')\n return release_notes\n elif os.environ['TRAVIS_SECURE_ENV_VARS'] != 'true':\n print('No secure environment variables: not deploying anything')\n return release_notes\n elif len(release_files) == 0:\n print('No file to release')\n return release_notes\n else:\n print('Creating release from tag {}'.format(os.environ['TRAVIS_TAG']))\n\n headers = {\n 'User-Agent': 'Deploy-Script',\n 'Authorization': 'token {}'.format(os.environ['GH_TOKEN'])\n }\n\n changelog_content = ''\n if changelog:\n with open(changelog, 'r') as changelog_file:\n changelog_content = changelog_file.read()\n\n create_raw_data = {\n \"tag_name\": os.environ['TRAVIS_TAG'],\n \"body\": \"\\n\\n{}\".format(changelog_content)\n }\n\n # if a release exist with this tag_name delete it first\n # this allows to create the release from github website\n url = '/repos/{repo_slug}/releases/tags/{tag}'.format(\n repo_slug=os.environ['TRAVIS_REPO_SLUG'],\n tag=os.environ['TRAVIS_TAG'])\n conn = http.client.HTTPSConnection('api.github.com')\n conn.request('GET', url, headers=headers)\n response = conn.getresponse()\n release = json.loads(response.read().decode())\n\n if 'upload_url' not in release:\n print('Failed to create release!')\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(release))\n exit(-1)\n\n conn = http.client.HTTPSConnection('uploads.github.com')\n for release_file in release_files:\n _, filename = os.path.split(release_file)\n headers['Content-Type'] = 'application/zip'\n url = '{release_url}?name={filename}'.format(release_url=release['upload_url'][:-13], filename=filename)\n print('Upload to {}'.format(url))\n\n with open(release_file, 'rb') as f:\n data = f.read()\n conn.request('POST', url, data, headers)\n\n response = conn.getresponse()\n result = response.read()\n if response.status != 201:\n print('Failed to upload filename {filename}'.format(filename=filename))\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(json.loads(result.decode())))\n print('File:')\n print(' Size: {}'.format(os.path.getsize(release_file)))\n\n if output:\n with open(output, 'w') as f:\n print(\"Writing release notes\")\n print(release_notes)\n f.write(release_notes)", "def release_notes(self, release_notes):\n self._release_notes = release_notes", "def default_changelog(release_link_format: str, breaking_change_token: str = \"BREAKING\"):\n return Changelog(\n header=\"\"\"# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog] and this project adheres to\n[Semantic Versioning].\n\nTypes of changes are:\n* **Security** in case of vulnerabilities.\n* **Deprecated** for soon-to-be removed features.\n* **Added** for new features.\n* **Changed** for changes in existing functionality.\n* **Removed** for now removed features.\n* **Fixed** for any bug fixes.\"\"\",\n config=ChangelogConfig(\n release_link_format=release_link_format,\n breaking_change_token=breaking_change_token,\n ),\n releases=OrderedDict(\n {\n ReleaseTag(\"Unreleased\"): ReleaseSection(entries={}, timestamp=None),\n }\n ),\n links=OrderedDict(\n {\n \"Unreleased\": release_link_format.format(previous_tag=\"initial\", tag=\"HEAD\"),\n \"Keep a Changelog\": \"http://keepachangelog.com/en/1.0.0/\",\n \"Semantic Versioning\": \"http://semver.org/spec/v2.0.0.html\",\n },\n ),\n )", "def create_release_notes(yaml_file, realease_notes_file, application_name):\n try:\n with open(yaml_file) as input_file: # read yaml file AND CONVERT IT INTO DICTIONARY\n release_dict=yaml.load(input_file, Loader=yaml.FullLoader)\n logging.info(\"FILE CONVERTED TO DICTIONARY SUCCESSFULLY\")\n \n \n except (FileNotFoundError,FileExistsError) as error: #file doesn't exist\n logging.warning(\"yaml file is not exist or damaged\")\n return None\n \n except yaml.scanner.ScannerError as error: # yaml file syntax error\n logging.warning(\"wrong yaml format\")\n return None\n \n\n with open(realease_notes_file,\"w\") as output_file :# create release note and write on it\n for key,value in release_dict.items():\n output_file.write(f\"{key}: \\n\")\n if type(value) == dict:\n for key2,value2 in value.items():\n output_file.write(f\" {key2}: {value2} \\n\")\n else:\n for value2 in value:\n output_file.write(f\" {value2} \\n\")\n output_file.write(\"\\n\")\n logging.info(\"RELEASE NOTES FILE CREATED SUCCESSFULLY\") \n return release_dict", "def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))", "def set_note_version_server(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n #Get the list of WebRtc nuget pakcages with prereleases\n packages = NugetUtility.nuget_cli('list', 'Id:WebRtc', '-PreRelease')\n packages = packages.split('\\r\\n')\n webrtcRegex = r\"^WebRtc+\\s\"\n #Search the list of the packages for a WebRtc package and set the version\n for package in packages:\n if re.match(webrtcRegex, package, flags=0):\n version = package\n\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version) \n \n # return to the base directory\n Utility.popd()", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def _append_descriptions(self, issue, dep_name, dep_latest_version):\n logging.info(\"Updating JIRA issue {0} to track {1} upgrade process\".format(\n issue.key,\n dep_name))\n description = issue.fields.description + \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n _, owners = self._find_owners(dep_name)\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n self.jira.update_issue(issue, description=description)\n except Exception as e:\n traceback.print_exc()\n logging.error(\"Failed updating issue: \"+ str(e))", "def get_changelog(no):\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]", "def _get_pkg_changelog_contents(ctx: Context, version: str):\n changes = _get_changelog_contents(ctx, version)\n changes = \"\\n\".join(changes.split(\"\\n\")[2:])\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Removed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Deprecated\n ----------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Changed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Fixed\n -----\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Added\n -----\n\n \"\"\"\n ),\n \"\",\n )\n return changes", "def generateReleaseRunBB(self, job):\n pass", "def make_release():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--destination\", action=\"store\", type=\"string\", \n dest=\"destdir\",\n help=\"directory where distributions and docs will be placed\")\n parser.add_option(\"-v\", \"--version\", action=\"store\", type=\"string\", \n dest=\"version\",\n help=\"version string applied to all openmdao distributions\")\n parser.add_option(\"-m\", action=\"store\", type=\"string\", dest=\"comment\",\n help=\"optional comment for version tag\")\n parser.add_option(\"-b\", \"--basebranch\", action=\"store\", type=\"string\", \n dest=\"base\", default='master', \n help=\"base branch for release. defaults to master\")\n parser.add_option(\"-t\", \"--test\", action=\"store_true\", dest=\"test\",\n help=\"used for testing. A release branch will not be created\")\n parser.add_option(\"-n\", \"--nodocbuild\", action=\"store_true\", \n dest=\"nodocbuild\",\n help=\"used for testing. The docs will not be rebuilt if they already exist\")\n parser.add_option(\"--host\", action='append', dest='hosts', metavar='HOST',\n default=[],\n help=\"host from config file to build bdist_eggs on. \"\n \"Multiple --host args are allowed.\")\n parser.add_option(\"-c\", \"--config\", action='store', dest='cfg', \n metavar='CONFIG', default='~/.openmdao/testhosts.cfg',\n help=\"path of config file where info for hosts is located\")\n (options, args) = parser.parse_args(sys.argv[1:])\n \n if not options.version or not options.destdir:\n parser.print_help()\n sys.exit(-1)\n \n _check_version(options.version)\n\n options.cfg = os.path.expanduser(options.cfg)\n \n config = ConfigParser.ConfigParser()\n config.readfp(open(options.cfg))\n \n haswin = False\n for host in options.hosts:\n if host == 'localhost':\n if sys.platform.startswith('win'):\n haswin = True\n elif config.has_section(host):\n platform = config.get(host, 'platform')\n if platform == 'windows':\n haswin = True\n if not haswin:\n print \"no windows host was specified, so can't build binary eggs for windows\"\n sys.exit(-1)\n \n orig_branch = get_git_branch()\n if not orig_branch:\n print \"You must run mkrelease from within a git repository. aborting\"\n sys.exit(-1)\n\n if not options.test:\n if orig_branch != options.base:\n print \"Your current branch '%s', is not the specified base branch '%s'\" % (orig_branch, options.base)\n sys.exit(-1)\n \n if _has_checkouts():\n print \"There are uncommitted changes. You must run mkrelease.py from a clean branch\"\n sys.exit(-1)\n \n if orig_branch == 'master':\n print \"pulling master\"\n os.system(\"git pull origin master\")\n if _has_checkouts():\n print \"something went wrong during pull. aborting\"\n sys.exit(-1)\n else:\n print \"WARNING: base branch is not 'master' so it has not been\"\n print \"automatically brought up-to-date.\"\n answer = raw_input(\"Proceed? (Y/N) \")\n if answer.lower() not in [\"y\", \"yes\"]:\n sys.exit(-1)\n \n relbranch = \"release_%s\" % options.version\n if relbranch in get_git_branches():\n print \"release branch %s already exists in this repo\" % relbranch\n sys.exit(-1)\n\n print \"creating release branch '%s' from base branch '%s'\" % (relbranch, orig_branch)\n check_call(['git', 'branch', relbranch])\n print \"checking out branch '%s'\" % relbranch\n check_call(['git', 'checkout', relbranch])\n \n destdir = os.path.abspath(options.destdir)\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n\n startdir = os.getcwd()\n topdir = repo_top()\n \n cfgpath = os.path.expanduser(options.cfg)\n \n try:\n _update_releaseinfo_files(options.version)\n \n # build the docs\n docdir = os.path.join(topdir, 'docs')\n idxpath = os.path.join(docdir, '_build', 'html', 'index.html')\n \n if not os.path.isfile(idxpath) or not options.nodocbuild:\n build_docs(argv=['-v', options.version])\n shutil.copytree(os.path.join(topdir,'docs','_build', 'html'), \n os.path.join(destdir,'docs'))\n\n if not options.test:\n # commit the changes to the release branch\n print \"committing all changes to branch '%s'\" % relbranch\n check_call(['git', 'commit', '-a', '-m', \n '\"updating releaseinfo files for release %s\"' % \n options.version])\n\n # build openmdao package distributions\n proj_dirs = []\n for project_name, pdir, pkgtype in openmdao_packages:\n pdir = os.path.join(topdir, pdir, project_name)\n if 'src' in os.listdir(pdir):\n os.chdir(os.path.join(pdir, 'src'))\n else:\n os.chdir(pdir)\n print 'building %s' % project_name\n _build_sdist(pdir, destdir, options.version)\n if pkgtype == 'bdist_egg':\n proj_dirs.append(pdir)\n \n os.chdir(startdir)\n _build_bdist_eggs(proj_dirs, destdir, options.hosts, cfgpath)\n \n print 'creating bootstrapping installer script go-openmdao.py'\n installer = os.path.join(os.path.dirname(__file__),\n 'mkinstaller.py')\n \n check_call([sys.executable, installer, '--dest=%s'%destdir])\n\n if options.comment:\n comment = options.comment\n else:\n comment = 'creating release %s' % options.version\n \n if options.test:\n _rollback_releaseinfo_files()\n else:\n # tag the current revision with the release version id\n print \"tagging release with '%s'\" % options.version\n check_call(['git', 'tag', '-f', '-a', options.version, '-m', comment])\n \n check_call(['git', 'checkout', orig_branch])\n print \"\\n*REMEMBER* to push '%s' up to the master branch if this release is official\" % relbranch\n \n print \"new release files have been placed in %s\" % destdir\n \n finally:\n os.chdir(startdir)", "def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()", "def printNotes():\n print(\"Generating testsuiteNodes.js...\", end=\"\")\n\n suite = reftest.reftestSuite()\n fp = file(MATHJAX_WEB_PATH + \"testsuiteNotes.html\", \"w\")\n stdout = sys.stdout\n sys.stdout = fp\n print('<!doctype>')\n print('<!-- ' + WARNING_GENERATED_FILE + '-->')\n print('<html>')\n print('<head>')\n print(' <meta http-equiv=\"Content-type\" content=\"text/html;charset=UTF-8\">')\n print(' <title>Testsuite Notes</title>')\n print(' <link rel=\"stylesheet\" type=\"text/css\" href=\"default.css\"/>')\n print('</head>')\n print('<body>')\n print('<div class=\"related\">')\n print(' <h3>Navigation</h3>')\n print(' <ul>')\n print(' <li><a href=\"./\">Back to home</a></li> ')\n print(' </ul>')\n print('</div>')\n\n print('<div class=\"body testsuiteNotes\">')\n print(' <h1>Testsuite Notes</h1>')\n\n suite.addReftests(\"printNotes\",\n MATHJAX_TESTSUITE_PATH, \"reftest.list\", -1)\n print('</div>')\n print('</body>')\n print('</html>')\n sys.stdout = stdout\n fp.close()\n\n print(\"done\")", "def _get_changelog_contents(ctx: Context, version: str):\n return ctx.run(\n \"towncrier\",\n \"build\",\n \"--draft\",\n f\"--version={version}\",\n capture=True,\n ).stdout.decode()", "def to_XML(self, targets):\n self.bug = et.Element(\"bug\")\n\n self.__add_subelement(\"creation_time\", \"created\",\n self.__format_time)\n self.__add_subelement(\"title\", \"summary\")\n self.__add_subelement(\"status\", \"status\",\n self.__convert_status)\n self.__add_subelement(\"reporter\", \"reporter\")\n self.__add_subelement(\"reporter\", \"creator\")\n # FIXME\n #self.__add_subelement(\"assignee\", \"assigned\")\n\n # BE will create UUIDs automatically if they are not present\n # in the XML (or if the -p flag is not specified). However,\n # we need the UUIDs to record relationships between bugs\n # and BE targets (corresponding to Ditz releases), so we \n # create our own here.\n bug_uuid = str(uuid.uuid4())\n et.SubElement(self.bug, \"uuid\").text = bug_uuid\n\n if self.desc is not None:\n self.bug.append(make_comment(self.desc,\n self.reporter,\n self.__format_time(\n self.creation_time)))\n\n if self.release is not None:\n if self.release not in targets:\n # There should already be an entry for the target\n # taken from the Ditz project.yaml file, but in case\n # this is missing for any reason we create it here\n # and assume a status of \"open\".\n target_uuid = str(uuid.uuid4())\n targets[self.release] = (target_uuid, \"open\", [])\n et.SubElement(self.bug, \"extra-string\").text = \\\n \"BLOCKS:\" + targets[self.release][0]\n targets[self.release][2].append(bug_uuid)\n\n for date, reporter, action, comment in self.log_events:\n if comment is not None and comment != \"\":\n self.bug.append(make_comment(comment,\n reporter,\n self.__format_time(\n date)))\n\n\n #for comment in get_comments(cnf['git_user'], cnf['git_password'],\n # cnf['repo'], iss[u\"number\"]):\n # self.bug.append(make_comment(comment[u\"body\"],\n # comment[u\"user\"][u\"login\"],\n # format_time(comment[u\"updated_at\"])))\n\n return self.bug", "def deploy(version):\n toolkit.readmegen(version)", "def create_changelog (component):\n vprint (\"Creating ChangeLog entry for \" + component)\n\n old_tag = get_tag (old_comp_versions, 'ACE')\n\n # Generate changelogs per component\n path = get_path(component, \"ChangeLogs\", component + \"-\" + comp_versions[component + \"_version_\"])\n ex (\"cd $DOC_ROOT/ACE_TAO && git log \" + old_tag + \"..HEAD \" + component + \" > \" + path)\n\n return [path]", "def _get_releaseinfo_str(version):\n opts = {}\n f = StringIO.StringIO()\n opts['version'] = version\n opts['date'] = get_git_log_info(\"%ci\")\n opts['comments'] = get_git_log_info(\"%b%+s%+N\")\n opts['commit'] = get_git_log_info(\"%H\")\n f.write(relfile_template % opts)\n return f.getvalue()", "def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after", "def test_preserveTicketHint(self):\n news = self.project.child('NEWS')\n news.setContent(\n 'Ticket numbers in this file can be looked up by visiting\\n'\n 'http://twistedmatrix.com/trac/ticket/<number>\\n'\n '\\n'\n 'Blah blah other stuff.\\n')\n\n self.builder.build(self.project, news, \"Super Awesometastic 32.16\")\n\n self.assertEquals(\n news.getContent(),\n 'Ticket numbers in this file can be looked up by visiting\\n'\n 'http://twistedmatrix.com/trac/ticket/<number>\\n'\n '\\n'\n 'Super Awesometastic 32.16\\n'\n '=========================\\n'\n '\\n'\n 'Features\\n'\n '--------\\n'\n ' - We now support the web. (#5)\\n'\n ' - The widget is more robust. (#12)\\n'\n ' - A very long feature which takes many words to describe with any\\n'\n ' accuracy was introduced so that the line wrapping behavior of the\\n'\n ' news generating code could be verified. (#15)\\n'\n ' - A simpler feature described on multiple lines was added. (#16)\\n'\n '\\n'\n 'Bugfixes\\n'\n '--------\\n'\n ' - Broken stuff was fixed. (#23)\\n'\n '\\n'\n 'Improved Documentation\\n'\n '----------------------\\n'\n ' - foo.bar.Baz.quux (#40)\\n'\n ' - writing Foo servers (#41)\\n'\n '\\n'\n 'Deprecations and Removals\\n'\n '-------------------------\\n'\n ' - Stupid stuff was deprecated. (#25)\\n'\n '\\n'\n 'Other\\n'\n '-----\\n'\n ' - #30, #35\\n'\n '\\n\\n'\n 'Blah blah other stuff.\\n')", "def make_release(self, **kwargs) -> CrossrefEventsRelease:\n\n start_date, end_date, first_release = self.get_release_info(**kwargs)\n\n release = CrossrefEventsRelease(\n self.dag_id, start_date, end_date, first_release, self.mailto, self.max_threads, self.max_processes\n )\n return release", "def gen_build_str_dec():\n\t#Get name of person building firmware\n\t#git config --get-all user.name\n\t#Get repo revision\n\t#git log | head -1 | cut -d \" \" -f 2\n\t#Get branch\n\t#git branch | grep \"\\*\" | cut -d \" \" -f 2\n\t#Get modified status\n\t#Date, time, gcc version (__VERSION__)\n\ts = \"Miniboard Firmware rev \"\n\treturn \"\"", "def create_release(ctx):\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )" ]
[ "0.7133973", "0.62649", "0.6180011", "0.61652756", "0.57979625", "0.5698693", "0.56655735", "0.56425494", "0.55498487", "0.55108356", "0.5495176", "0.5493605", "0.5473608", "0.54344285", "0.5391342", "0.5365955", "0.5322172", "0.5317257", "0.5311151", "0.52918744", "0.52649474", "0.52396494", "0.5229949", "0.5215989", "0.5195518", "0.51755357", "0.51697934", "0.5121394", "0.5102438", "0.50863516" ]
0.6374402
1
Gets the confidence of this PcrTestRecordResult.
def confidence(self): return self._confidence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence(self) -> float:\n return self._confidence", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def detection_confidence(self):\n return self._detection_confidence", "def get_medie_confidence(self):\n return self.__medie_confidence", "def get_min_confidence(self):\n return self.__min_confidence", "def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def min_confidence(self) -> float:\n return self._min_confidence", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def landmarking_confidence(self):\n return self._landmarking_confidence", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def confidence(self, filename):\n f = open(filename, 'rb')\n content = list(f.read())\n f.close()\n\n file_entropy = self.entropy(content)\n\n return (round(file_entropy / 8 * 100), filename)", "def __determina_media_confidence(self):\n media = 0\n nr = 0\n for el in self.__results['conf']:\n media += int(el)\n nr += 1\n media /= nr\n return media", "def confidence(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def is_successful(self):\n try:\n if self.is_skipped:\n return TestCase.EX_TESTCASE_SKIPPED\n assert self.criteria\n assert self.result is not None\n if (not isinstance(self.result, str) and\n not isinstance(self.criteria, str)):\n if self.result >= self.criteria:\n return TestCase.EX_OK\n else:\n # Backward compatibility\n # It must be removed as soon as TestCase subclasses\n # stop setting result = 'PASS' or 'FAIL'.\n # In this case criteria is unread.\n self.__logger.warning(\n \"Please update result which must be an int!\")\n if self.result == 'PASS':\n return TestCase.EX_OK\n except AssertionError:\n self.__logger.error(\"Please run test before checking the results\")\n return TestCase.EX_TESTCASE_FAILED", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def result(self):\n prec_value = self.precision.result()\n recall_value = self.recall.result()\n return 2 * math_ops.div_no_nan(prec_value * recall_value,\n prec_value + recall_value)", "def confidence(self, confidence):\n self._confidence = confidence", "def confidence(self, confidence):\n self._confidence = confidence", "def confidence(self, confidence: float):\n\n self._confidence = confidence", "def confidence_values(self) -> List[Union[int, str]]:\n\n return self._confidence_values", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def calc_confidence_level(self, z_value):\n\n confidence_level = 0.5 * (1 + math.erf(z_value/2**0.5))\n\n return confidence_level", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0" ]
[ "0.74183095", "0.695079", "0.6752761", "0.62172127", "0.6101365", "0.59268916", "0.59007055", "0.58964837", "0.58102906", "0.5795349", "0.5786901", "0.57829434", "0.5746195", "0.56174123", "0.56115687", "0.5601758", "0.55095553", "0.55019873", "0.545608", "0.5437329", "0.5400275", "0.5359699", "0.53264403", "0.5325444", "0.5325444", "0.5318751", "0.5312392", "0.5309795", "0.53082025", "0.5288559" ]
0.7580267
1
Prompt user for input and continue to do so until input is valid. This function takes two required inputs, the message to display, and the limit of characters required. If the user enters something too long, they are prompted again until the input is correct. If the optional isNumber parameter is True, then it will also continue to prompt the user until a valid number is input.
def LimitedInput(message, limit, isNumber=False): keepAsking = True while keepAsking: answer = input(message) if len(answer) > limit: print("The input must be", limit, "characters or less.") else: keepAsking = False if isNumber is True and CheckNumber(answer) is False: print("The input must be a number.") keepAsking = True return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number", "def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue", "def get_number_input(msg=\"Provide a number: \", num_type=int):\n while True:\n try:\n num = num_type(input(msg))\n except ValueError:\n print(f\"Whoops!! Please enter a correct number of {num_type}!!\")\n continue\n else:\n print(\"Number accepted!!\")\n return num", "def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret", "def pedir_entero(msg, min, max):\n while True:\n n = str(raw_input(msg))\n if not n.isdigit() :\n show_msg(\"Oops! Parece que eso no era un numero entero\")\n continue\n n = int(n)\n if n <= max and n >= min :\n return n\n else:\n show_msg(\"Numero fuera de rango\")\n continue", "def prompt_with_limits(prompt, default=None, low_limit=None, high_limit=None):\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n try:\n v = float(value)\n if (low_limit is not None and v < low_limit) or \\\n (high_limit is not None and v > high_limit):\n value = None\n except (ValueError, TypeError):\n value = None\n elif default is not None:\n value = default\n\n return value", "def PickNumber(lenList, message = ' To select the correct option pick a number in range ',min = 1, typeInput = int):\n while True:\n try:\n input1 = typeInput(input('\\n'+message+str(min)+'-'+str(lenList)+': \\t'))\n except ValueError:\n print( 'That\\'s not a number!')\n else:\n if min <= input1 <= lenList:\n return input1\n else:\n print( 'Number out of range. Try again!')", "def get_user_text() -> str:\n validinput = False\n while not validinput:\n intext = input(\"Which of your most favorite quotes can Polly cook up for you?\")\n if len(intext) > POLLY_CHAR_LIMIT:\n print(\"You have entered in more text that Polly can support in one call.\")\n validinput = False\n else:\n validinput = True\n return intext", "def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number", "def validate(prompt, char_type, case):\n if char_type == 'A' and case == \"U\":\n while True:\n user_input = input(prompt).upper()\n try:\n if len(user_input) > 245:\n print(f'\\n.............\\n'\n f'Invalid input you entered {len(user_input)} characters\\n'\n f'Character limit is 245.\\n')\n elif user_input.replace(\" \", \"\").isalpha():\n return user_input\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n elif char_type == 'I':\n while True:\n user_input = input(prompt)\n try:\n if 26 > int(user_input) > 0:\n return int(user_input)\n print(\"\\n.............\\n\"\n \"Invalid input, outside range of 1-25.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, not a number.\\n\")", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length", "def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")", "def get_employee_input_int(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = int(user_input)\n break\n except ValueError:\n print('You must enter a whole number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number", "def confirm():\n end_loop = False\n while not end_loop:\n confirmation = input(\"\"\"Would you like to continue with your choice?\n[1] No [2] Yes\nEnter a number please: \"\"\")\n if not confirmation or confirmation.isspace():\n print(\"You have not entered anything!\")\n try_again()\n elif confirmation.isnumeric() == True:\n if 0 < int(confirmation) < 3:\n if int(confirmation) == 1:\n confirmation = False\n return confirmation\n else:\n confirmation = True\n return confirmation\n end_loop = True\n else:\n print(\"You have not entered a valid number. Please enter a number between 1 and 2.\")\n else:\n print(\"Please enter a number only.\")\n try_again()", "def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)", "def clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r", "def not_number_rejector(message):\n actual_number = False\n\n while not actual_number:\n guess = str(input(message))\n if guess.isdigit():\n actual_number = True\n return int(guess)\n else:\n print(\"Not a number\")", "def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input", "def get_number():\n\n while True:\n user_number_str = input('Digite um número para saber o seu fatorial: ').strip()\n\n if user_number_str.isnumeric():\n return int(user_number_str)\n else:\n print('Valor inválido.')", "def get_input():\n\n end_loop = True # Used to stop the loop for user input\n while end_loop:\n try:\n user_input = str(float(input(\"Please enter a number: \")))\n end_loop = False # The loop breaks once the user has entered valid input\n except():\n print(\"Invalid input, please try again.\")\n\n return user_input", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def number_format(num):\n while True:\n try:\n user_input = float(input(num))\n return user_input\n except ValueError:\n print(\"Error. Please enter the desired number. You may use \"\n \"decimals.\")\n except:\n print(\"Error: unknown.\")", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def validation_method(input_value):\r\n while True:\r\n try:\r\n valor = float(input(input_value))\r\n return valor\r\n except ValueError:\r\n print(\" ingresa un número\")", "def maximum():\n if len(a_variable.get()) > MAX_CHARACTERS:\n messagebox.showwarning(title=\"Max Characters Exceeded!\",\n message=\"Please enter no more than 25\\n\"\n \"characters, thanks.\")\n clear_box() # Clears the entry field" ]
[ "0.6210186", "0.60945845", "0.6090262", "0.59687257", "0.5932961", "0.5927148", "0.59168947", "0.58883595", "0.5841568", "0.58019143", "0.5782949", "0.5775905", "0.5761319", "0.5716825", "0.57102144", "0.5705627", "0.5701563", "0.5696017", "0.56239253", "0.5619944", "0.5563028", "0.55464673", "0.55456233", "0.5532081", "0.55233353", "0.5516909", "0.5509924", "0.5505276", "0.5493249", "0.5482043" ]
0.83368486
0
This function returns True if userInput can be converted to a number and returns False if it cannot.
def CheckNumber(userInput): try: float(userInput) return True except(ValueError): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def isNumber(num):\n try:\n abs(num)\n return True\n except:\n return False", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_digit(user_input):\n # If any characters is digit return boolean True else False\n if any(char.isdigit() for char in user_input):\n return True\n return False", "def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_number(self) -> bool:\n return False", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)", "def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True", "def validate_answer(answer):\r\n try:\r\n float(answer)\r\n return True\r\n except ValueError:\r\n return False", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def validation_method(input_value):\r\n while True:\r\n try:\r\n valor = float(input(input_value))\r\n return valor\r\n except ValueError:\r\n print(\" ingresa un número\")", "def checkNumberInt(value):\n if value.isnumeric():\n return int(value)\n else:\n print(\"You did not enter the correct numbers!\")\n newNum = input(\"Please enter a number: \")\n return checkNumberInt(newNum)", "def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False", "def is_number(n):\n return isinstance(n, (int, float))", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))" ]
[ "0.7273646", "0.710573", "0.70342", "0.700277", "0.6989638", "0.69800496", "0.6931918", "0.68965924", "0.68937594", "0.68852633", "0.6882689", "0.68496823", "0.6842476", "0.6823968", "0.68192196", "0.68040407", "0.679756", "0.6784865", "0.67602575", "0.6758838", "0.673784", "0.67236334", "0.67183536", "0.6688678", "0.6659615", "0.66254634", "0.6623485", "0.66204065", "0.6614409", "0.66125953" ]
0.86746126
0
This function prompts the user for a date using the message variable. User will continue to be prompted until the format is correct. The date format is very specific in the format DD/MM/YYYYY This function will confirm there are the right number of characters, the / are in the right place, the input are numbers, the days are between 1 and 31, the months are between 1 and 12, and the year is between 2000 and 3000 (roll on year 3k bug!)
def DateInput(message): askAgainMessage = "The date must be in the format DD/MM/YYYY" keepAsking = True while keepAsking: answer = input(message) # First we check if there are two / by splitting using / and looking # for 3 items in the returned list. dateCheck = answer.split(sep="/") if len(dateCheck) is not 3: print(askAgainMessage) else: # If all is order, we can assign the 3 items to day, month, year day = dateCheck[0] month = dateCheck[1] year = dateCheck[2] # Next we check each item has the right amount of characters # and they can all be converted into numbers. if (len(day) == 2 and len(month) == 2 and len(year) == 4 and CheckNumber(day) and CheckNumber(month) and CheckNumber(year)): day = int(day) month = int(month) year = int(year) if (day > 0 and day < 32 and month > 0 and month < 13 and year > 2000 and year < 3000): keepAsking = False else: print(askAgainMessage) else: print(askAgainMessage) return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def enter_date():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'date': ''}\n\n while not valid_data:\n input_data['date'] = get_input(\"Date of the task\" + \"\\n\" + \"Please use DD/MM/YYYY format: \")\n if re.match('\\d{2}/\\d{2}/\\d{4}', input_data['date']):\n try:\n datetime.datetime.strptime(input_data['date'], '%d/%m/%Y')\n except ValueError:\n clean_scr()\n get_input(\"Enter a valid date. Press enter to try again.\")\n else:\n valid_data = True\n clean_scr()\n\n return input_data['date']", "def validate_input(date_string):\n #I decided to make sure the input was valid by checking each individual piece. I did this by splitting the input string by the dashes.\n #I checked first that the month value was between 1 and 12. I then checked depending on the month if the day value was valid.\n #I also made sure to check that the year was greater than 1000.\n #For February, I made a specific check for if it was a leap year or not. If the year inputted is not a leap year and the user entered\n #29 as the day value, it throws an error. Finally, once all values are checked and are valid, they are put into a tuple.\n splitdate = date_string.split(\"-\")\n if splitdate[0] != '' and splitdate[1] != '' and splitdate[2] != '':\n if int(splitdate[0]) >= 1 and int(splitdate[0]) <= 12:\n if int(splitdate[0]) == 1 or int(splitdate[0]) == 3 or int(splitdate[0]) == 5 or int(splitdate[0]) == 7 or int(splitdate[0]) == 8 or int(splitdate[0]) == 10 or int(splitdate[0]) == 12:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 31:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 4 or int(splitdate[0]) == 6 or int(splitdate[0]) == 9 or int(splitdate[0]) == 11:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 30:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 2:\n if int(splitdate[2]) % 4 == 0 or int(splitdate[2]) % 1000 == 0:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 29:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[1]) >= 1 and int(splitdate[1]) <= 28:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n return None", "def get_date(text=\"\"):\n clear()\n date = input(\"Enter {}date (Format:YYYY-MM-DD): \".format(text))\n try:\n datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except ValueError:\n input(\"Please enter date in this format: YYYY-MM-DD.\"\n \" Press enter to continue.\")\n return get_date()\n else:\n return date", "def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)", "def date_format(date):\n\n formatted = True\n task_date = date\n while formatted:\n try:\n datetime.datetime.strptime(task_date, \"%m/%d/%Y\")\n formatted = False\n clear()\n except ValueError:\n clear()\n task_date = input(\n \"Sorry. That is not a valid date. Please enter a date \"\n \"in the MM/DD/YYYY format: \\n>\")\n\n return task_date", "def get_date(custom_text):\n fmt = '%m/%d/%Y'\n while True:\n clear()\n print(\"Date Format: month/day/year --/--/----\\n\")\n print(\"{}\\n\".format(custom_text))\n task_date = input(\"Please input a date: \")\n try:\n datetime.datetime.strptime(task_date, fmt)\n except ValueError:\n print(\"'{}' doesn't seem to be a valid date.\".format(task_date))\n input(\"Press Enter\")\n except AttributeError:\n print(\"'{}' doesn't seem to be a valid date.\".format(task_date))\n input(\"Press Enter\")\n else:\n return datetime.datetime.strptime(task_date, fmt).date()\n break", "def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")", "def date():\r\n while True:\r\n clear()\r\n task_date = input(\"When was this task performed? Date format: dd-mm-yyyy \\n > \").strip()\r\n try:\r\n task_date = datetime.datetime.strptime(task_date, \"%d-%m-%Y\")\r\n if task_date.date() > datetime.datetime.today().date():\r\n\r\n input(\" Sorry, date can't be later than today's date. Press enter and provide a correct date \")\r\n continue\r\n\r\n except ValueError:\r\n input(\" Sorry, not a valid date. Press enter and provide a correct date... \")\r\n continue\r\n\r\n except Exception: \r\n raise(\"Something went wrong.\")\r\n input(\"Press enter to continue...\")\r\n continue \r\n\r\n else:\r\n return task_date.strftime(\"%d-%m-%Y\")", "def getdatefromuser():\n date_str = raw_input(\"Enter the date cutoff in mm/dd/yyyy format: \")\n date_parts = re.split('[-/]', date_str)\n return date(*[int(elt) for elt in [date_parts[2], date_parts[0], date_parts[1]]])", "def check_date(date, logger):\n logger.info('Checking the entered date...')\n try:\n (datetime.datetime.strptime(date, '%Y%m%d')).date()\n return True\n except Exception:\n raise SystemExit('Please, enter the date in \"YYYYMMDD\" format')", "def exact_date(self):\n print(\"Exact Date Search\")\n date_string = input(\"Enter a date in the format DD/MM/YYYY> \")\n return date_string", "def validate(self, string, pos):\n res, string, pos = super().validate(string, pos)\n\n if res == 0:\n if string[pos-1].isdigit() and string.count('/') < 2:\n string = string[0:pos-1] + \"/\" + string[pos-1]\n res = 1\n pos = pos+1\n\n\n # get the mo/da/yr as array\n date = string.split(\"/\")\n\n # init to something that cannot be entered\n mo = da = yr = \"-1\"\n\n # update vars based on array\n try:\n mo = date[0]\n da = date[1]\n yr = date[2]\n except:\n pass\n\n # change arr len of date based on overwritten values\n if mo == \"-1\":\n date = []\n elif da == \"-1\":\n date = date[:1]\n elif yr == \"-1\":\n date = date[:2]\n\n # if entering date that doesnt exist, stop it\n try:\n if int(mo) > 12:\n date[0] = \"1\"\n pos -= 1\n if int(da) > 31:\n date[1] = \"3\"\n pos -= 1\n\n except Exception as e:\n pass\n\n # reform the date\n string = '/'.join(date)\n\n return res, string, pos", "def search_date(self, text='date'):\n\n date = input(f\"\\nEnter a {text} (MM-DD-YYYY): \")\n date_obj = datetime.strptime(date, \"%m-%d-%Y\")\n\n try:\n date = datetime.strftime(date_obj, \"%m-%d-%Y\")\n return date\n except ValueError:\n input(\"\\nFormat of date must be MM-DD-YYYY\\n\")\n return self.search_date()", "def validate_date_format_yyy_mm_dd(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def get_user_input():\n # Gets user input in M\\nD\\nYYYY format for the start date\n start_instrings = [\"Enter start month: \",\n \"Enter start day: \", \"Enter start year: \"]\n raw_start_date = tuple(input(s) for s in start_instrings)\n # Gets user input in M\\nD\\nYYYY format for the end date\n end_instrings = [\"Enter end month: \",\n \"Enter end day: \", \"Enter end year: \"]\n raw_end_date = tuple(input(s) for s in end_instrings)\n\n # Uses map to convert string input to integers and stores the values in a tuple\n start_date = tuple(map(int, raw_start_date))\n end_date = tuple(map(int, raw_end_date))\n\n # Checks if each year is within the date limit\n if not(1971 <= start_date[2] <= 2020 and 1971 <= end_date[2] <= 2020):\n raise Exception(\"Input date/s outside date limit.\")\n\n # Cyclic rotation of elements (because I really really **really** want to unpack)\n # Source: https://www.geeksforgeeks.org/python-shift-last-element-to-first-position-in-list/\n start_date, end_date = start_date[-1:] + \\\n start_date[:-1], end_date[-1:] + end_date[:-1]\n\n # As you can see unpacking makes the line smaller and more readable\n # return DateRange(datetime.date(start_date[2], start_date[0], start_date[1]), datetime.date(end_date[2], end_date[0], end_date[1]))\n return DateRange(datetime.date(*start_date), datetime.date(*end_date))", "def input_date(self, date_attr):\r\n try:\r\n date = input(\"Entrez la \" + date_attr + \"(JJ/MM/AAAA): \")\r\n datetime.datetime.strptime(date, '%d/%m/%Y')\r\n return date\r\n except ValueError:\r\n print(\"Erreur de saisie de la date (format JJ/MM/AAAA)\")\r\n return self.input_date(date_attr)", "def chkDate(stdin):\n # return \"Y\" if dateCheck(stdin) else \"N\"\n return run(\"./chkdate\", [], stdin)[1].strip()", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n print(\"Checking date \", str(year) + \"/\" + str(start_month) + \"/\" + str(start_day))\n \n\n if not is_valid(year, start_month, start_day) : \n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n if not is_valid(2000, end_month, end_day):\n sys.exit(\"Ending month and day must be part of a valid date\")\n count_days(year,start_month,start_day,end_month,end_day)", "def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)", "def check_dateformat(date_field, date_format='YYYY-MM-DD'):\r\n if not date_format or not date_field:\r\n return None\r\n # format = \"%Y-%m-d\"\r\n date_field = date_field.strip()\r\n\r\n try:\r\n dd = None\r\n mm = None\r\n yyyy = None\r\n seperator = '-'\r\n date_part = date_field\r\n time_part = None\r\n if '/' in date_field:\r\n seperator = '/'\r\n if ' ' in date_field:\r\n (date_part, time_part) = date_field.split(' ')\r\n\r\n if not time_part:\r\n if date_format == 'DD-MM-YYYY' or date_format == 'DD/MM/YYYY':\r\n (dd, mm, yyyy) = date_part.split(seperator)\r\n elif date_format == 'YYYY-MM-DD' or date_format == 'YYYY/MM/DD':\r\n (yyyy, mm, dd) = date_part.split(seperator)\r\n elif date_format == 'YYYY-DD-MM' or date_format == 'YYYY/DD/MM':\r\n (yyyy, dd, mm) = date_part.split(seperator)\r\n yyyy = int(yyyy)\r\n dd = int(dd)\r\n mm = int(mm)\r\n date_part = date(yyyy, mm, dd)\r\n return date_part\r\n else:\r\n raise SIDException(\r\n 'Invalid Date: datetime not supported', 'datetime')\r\n # to support further \"%d/%m/%Y %H:%M:%S\"\r\n\r\n # date_string = str(yyyy) + '-' + str(mm) + '-' + str(dd)\r\n # return datetime.strptime(date_string, format)\r\n\r\n except Exception:\r\n raise SIDException('Invalid Date', 'check_dateformat')", "def read_day():\n\twhile True:\n\t\t_day = input(\"Introduceti ziua: \")\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)", "def get_date(prompt, title, min_date, max_date):\r\n question = prompt + ' Please select the year:'\r\n choices = [i for i in range(min_date.year, max_date.year + 1)]\r\n year = e.choicebox(question, title, choices)\r\n if year == None:\r\n raise QuitError\r\n else:\r\n year = int(year)\r\n question = 'Please select the month:'\r\n choices = [('0' + str(i))[-2:] for i in range(1, 13)]\r\n if min_date.year == max_date.year:\r\n choices = choices[min_date.month - 1: max_date.month]\r\n elif year == min_date.year:\r\n choices = choices[min_date.month - 1:]\r\n elif year == max_date.year:\r\n choices = choices[:max_date.month]\r\n month = e.choicebox(question, title, choices)\r\n if month == None:\r\n raise QuitError\r\n else:\r\n month = int(month)\r\n question = 'Please select the day:'\r\n month_length = c.monthrange(year, month)[1]\r\n choices = [('0' + str(i))[-2:] for i in range(1, month_length + 1)]\r\n if (min_date.year, min_date.month) == (max_date.year, max_date.month):\r\n choices = choices[min_date.day - 1: max_date.day]\r\n elif (year, month) == (min_date.year, min_date.month):\r\n choices = choices[min_date.day - 1:]\r\n elif (year, month) == (max_date.year, max_date.month):\r\n choices = choices[:max_date.day]\r\n day = e.choicebox(question, title, choices)\r\n if day == None:\r\n raise QuitError\r\n else:\r\n day = int(day)\r\n return d.date(year, month, day)", "def valid_date(date_string):\n date_string_number = re.sub('\\D', '', date_string)\n try:\n date_res = datetime.strptime(date_string_number, '%Y%m%d').date()\n except ValueError:\n print(\"Not a valid date: '{}'.\".format(date_string))\n else:\n return date_res", "def parse_date(input):\n input = input.strip()\n if input == '':\n return None, None\n\n # Parse the start\n mo = yyyymmdd_re.match(input)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input)\n if not mo:\n mo = ddmmyyyy_re.match(input)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input)\n if mo:\n start = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return None, 'N'\n\n\n # Check if we're at the end of the input\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, start), None\n\n # Check for a range specifier\n mo = range_re.match(input, pos)\n if mo:\n pos = mo.end()\n else:\n return DateRange(start, start), 'T'\n\n # Parse the end date\n mo = yyyymmdd_re.match(input, pos)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input, pos)\n if mo:\n end = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return DateRange(start, start), 'T'\n\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, end), None\n return DateRange(start, end), 'T'", "def check_date(date):\n import datetime\n correctDate = None\n date = str(date)\n \n if (len(date)!=8):\n return False\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n try:\n datetime.datetime(year,month,day)\n correctDate = True\n except ValueError:\n correctDate = False\n return correctDate", "def date_datetime():\n date = input(\"give date in mon/day/year format(month like jan feb): \")\n return datetime.datetime.strptime(date, \"%b/%d/%Y\")", "def valid_date(date):\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def checkdate_re(name, val):\n mnames = calendar.month_name + calendar.month_abbr\n mat = _slash.match(val)\n if mat is not None:\n\tif string.capitalize(mat.group(1)) in mnames:\n\t return\n\ttry:\n\t x = string.atoi(mat.group(1))\n\texcept ValueError:\n\t raise ValidationError, \\\n\t\t 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t\t (name, val)\n mat = _amer.match(val)\n if (mat is not None and\n\tstring.capitalize(mat.group(1)) in mnames):\n\treturn\n mat = _euro.match(val)\n if (mat is not None and\n\tstring.capitalize(mat.group(2)) in mnames):\n\treturn\n raise ValidationError, \\\n\t 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t (name, val)", "def checkdate_regex(name, val):\n mnames = calendar.month_name + calendar.month_abbr\n if _slash.match(val) != -1:\n\tif string.capitalize(_slash.group(1)) in mnames:\n\t return\n\ttry:\n\t x = string.atoi(_slash.group(1))\n\texcept ValueError:\n\t raise ValidationError, 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t\t (name, val)\n if _amer.match(val) != -1 and string.capitalize(_amer.group(1)) in mnames:\n\treturn\n if _euro.match(val) != -1 and string.capitalize(_euro.group(2)) in mnames:\n\treturn\n raise ValidationError, 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t (name, val)" ]
[ "0.7718624", "0.74047714", "0.7254131", "0.6943424", "0.681888", "0.6775938", "0.6571638", "0.65667284", "0.6505889", "0.6504818", "0.6378474", "0.6291643", "0.6249938", "0.61674076", "0.61129284", "0.61053425", "0.6080972", "0.6056492", "0.60186255", "0.5945199", "0.5924336", "0.58938646", "0.58560985", "0.58498544", "0.58328795", "0.58292073", "0.5808484", "0.5794331", "0.579351", "0.578675" ]
0.8515413
0
takes first row of tworow belief np array and converts it to dict indexed by label of positive beliefs
def np_to_belief(np_array,labels): return dict((l,np_array[0,i]) for i,l in enumerate(labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_original_labels(array, threshold=0.5, initialization_value=999):\r\n \r\n binarized, belief = get_binarized_and_belief(array=array, threshold=threshold)\r\n \r\n #sanity check\r\n if binarized.shape != belief.shape:\r\n raise ValueError('Sanity check did not pass.')\r\n \r\n # initialize with a crazy label we will be sure is gone in the end\r\n slice_all_but_last_channel = tuple([slice(None) for _ in array.shape[:-1]] + [0])\r\n original_labels = initialization_value * np.ones_like(array[slice_all_but_last_channel])\r\n \r\n # the outer keys correspond to the binarized values\r\n # the inner keys correspond to the order of indices comingn from argsort(ascending) on suspicion, i.e. \r\n # how far the binarized sigmoid outputs were from the original sigmoid outputs \r\n # for example, (2, 1, 0) means the suspicion from least to greatest was: 'WT', 'TC', 'ET'\r\n # (recall that the order of the last three channels is expected to be: 'ET', 'TC', and 'WT')\r\n mapper = {(0, 0, 0): 0, \r\n (1, 1, 1): 4,\r\n (0, 1, 1): 1,\r\n (0, 0, 1): 2,\r\n (0, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 1,\r\n (1, 2, 0): 1,\r\n (0, 2, 1): 0,\r\n (0, 1, 2): 1}, \r\n (1, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 4,\r\n (1, 2, 0): 4,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4},\r\n (1, 0, 1): {(2, 0, 1): 4,\r\n (2, 1, 0): 2, \r\n (1, 0, 2): 2,\r\n (1, 2, 0): 2,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}, \r\n (1, 0, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 0,\r\n (1, 2, 0): 0,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}}\r\n \r\n \r\n \r\n done_replacing = False\r\n \r\n for binary_key, inner in mapper.items():\r\n mask1 = check_subarray(array1=binarized, array2=np.array(binary_key))\r\n if isinstance(inner, int):\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing, \r\n array=original_labels, \r\n mask=mask1, \r\n replacement_value=inner, \r\n initialization_value=initialization_value)\r\n else:\r\n for inner_key, inner_value in inner.items():\r\n mask2 = np.logical_and(mask1, check_subarray(array1=belief, array2=np.array(inner_key)))\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing,\r\n array=original_labels, \r\n mask=mask2, \r\n replacement_value=inner_value, \r\n initialization_value=initialization_value)\r\n \r\n if not done_replacing:\r\n raise ValueError('About to return so should have been done replacing but told otherwise.')\r\n \r\n return original_labels.astype(np.uint8)", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def elan_annotation_to_binary(annotation_data):\n label_dict = {}\n for annotation in annotation_data:\n label = 1 if annotation[2] == 'Engaged' else 0\n label_dict[\"{0},{1}\".format(annotation[0], annotation[1])] = label\n return label_dict", "def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def field_labels(label_row, datum_row):\n return dict(zip(label_row, datum_row))", "def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }", "def coherent_subsequent_states(Infomap_labels):\r\n unique_labels= np.unique(Infomap_labels)\r\n dictionary= {}\r\n for i in range(len(unique_labels)):\r\n label_index=[]\r\n for j in range(len(Infomap_labels)):\r\n if unique_labels[i]==Infomap_labels[j]:\r\n label_index.append(j)\r\n subsequent=groupSequence(label_index)\r\n \r\n dictionary[i]=subsequent\r\n \r\n return dictionary", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def make_represented_genders(metric_df, label_lang):\n return dict(metric_df[['bias_value', 'bias_label']].drop_duplicates().to_dict('split')['data'])", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def finalLabels(self) -> Tuple[ndarray, Dict[str, int]]:\n test = set([])\n resultTwo: Dict[str, int] = {}\n result = np.empty((self.dataSize,), dtype=int)\n for i, cluster in enumerate(self.clusters):\n for prototypeIdx in cluster:\n prototypeHash = hashSequence(self.data[prototypeIdx])\n resultTwo[prototypeHash] = self.classes[i]\n result[prototypeIdx] = i\n test.add(prototypeIdx)\n for i, nonPrototypeIdx in enumerate(self._getNonPrototypeIndices(self.clusters)):\n nonPrototypeHash = hashSequence(self.data[nonPrototypeIdx])\n resultTwo[nonPrototypeHash] = self.classes[self.labels[i]]\n result[nonPrototypeIdx] = self.labels[i]\n test.add(nonPrototypeIdx)\n return result, resultTwo", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels", "def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map", "def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])", "def get_training_labels():\n\n\tmapping = dict()\n\tmapping[constants.ASCause.apsp] = 0\n\tmapping[constants.ASCause.bl] = 1\n\tmapping[constants.ASCause.ce] = 2\n\tmapping[constants.ASCause.dfl] = 3\n\tmapping[constants.ASCause.lrssi] = 4\n\tmapping[constants.ASCause.pwr_state] = 5\n\treturn mapping", "def feature_sign_dict(three_feature_list):\n\n feature_dict = {}\n\n for i in list(range(1, 11)):\n feature_dict[-i] = three_feature_list[0]\n\n feature_dict[0] = three_feature_list[1]\n\n for i in list(range(1, 11)):\n feature_dict[i] = three_feature_list[2]\n\n return feature_dict", "def get_mappings():\n original_dict = ClassifierDataset.get_labels()\n return dict(zip(original_dict.values(), original_dict.keys()))", "def process_label(intents, w2v,class_id_startpoint=0):\n class_dict = {}\n label_vec = []\n class_id = class_id_startpoint\n \n for line in intents:\n # check whether all the words in w2v dict\n line=line[0]\n label = line.split(' ')\n for w in label:\n if not w in w2v.vocab:\n print('not in w2v dict', w)\n\n # compute label vec\n label_sum = np.sum([w2v[w] for w in label], axis = 0)\n label_vec.append(label_sum)\n # store class names => index\n class_dict[' '.join(label)] = class_id\n class_id = class_id + 1\n #print('=====label vec', label_vec)\n return class_dict, np.asarray(label_vec)", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def process_label(self, foreground_labels):\n # Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}\n unique_nonnegative_indices = np.unique(foreground_labels)\n mapped_labels = foreground_labels.copy()\n for k in range(unique_nonnegative_indices.shape[0]):\n mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k\n foreground_labels = mapped_labels\n return foreground_labels", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def labels_b(self):\n return self._labels_b", "def predict(self, row):\n label_vote = dict()\n for i in range(len(self.forest)):\n result = self.forest[i].predict(row)\n label = max(result, key=result.get)\n \n if label_vote.get(label, None) is None:\n label_vote[label] = 0\n\n label_vote[label] += 1\n \n return max(label_vote, key=result.get)", "def normalize_labels(labels):\n new_labels = np.array([-1] * len(labels))\n labels = np.array(labels)\n label_dict = dict()\n for i, label in enumerate(set(labels)):\n new_labels[np.where(labels == label)] = i\n label_dict[i] = label\n return label_dict, new_labels", "def get_binarized_and_belief(array, threshold=0.5):\r\n \r\n # check assumption above\r\n if (np.amax(array) > 1.0) or (np.amin(array) < 0.0):\r\n raise ValueError('Voxel value fed to lambda in converting to original labels was out of range.')\r\n \r\n # obtain binarized output\r\n binarized = binarize(array=array, threshold=threshold)\r\n \r\n # we will sort from least to greatest, so least suspicion is what we will believe\r\n raw_suspicion = np.absolute(array - binarized)\r\n \r\n belief = np.argsort(raw_suspicion, axis=-1)\r\n \r\n return binarized, belief", "def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels" ]
[ "0.6541998", "0.6310633", "0.5895067", "0.566295", "0.5637139", "0.5578173", "0.55679846", "0.55309737", "0.54985076", "0.5497383", "0.54970396", "0.5491054", "0.5483852", "0.5483852", "0.54637516", "0.54519457", "0.5446611", "0.5395349", "0.53934413", "0.53921664", "0.5384632", "0.5378295", "0.53603697", "0.53526324", "0.5343171", "0.5340845", "0.53375775", "0.5334531", "0.5333955", "0.53243893" ]
0.78097486
0
takes a list of votes and predicts based on threshold returns true iff fraction of true votes >= f
def thresh_vote(lst, f): if len(lst) == 0: # guess 0 by default (appropriate for our dataset) q = 0 else: q = float(sum(lst)) / len(lst) return q >= f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def sensitivity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n return recall(targets, preds, threshold)", "def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def predict_with_threshold(y_pred_proba, threshold):\n\n y_pred = [1 if x >= threshold else 0 for x in y_pred_proba]\n return pd.Series(data=y_pred, name='y_pred')", "def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)", "def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)", "def evaluate(labels, predictions):\n pos = 0\n neg = 0\n true_pos_rate = 0\n true_neg_rate = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n pos += 1\n else:\n neg += 1\n if predictions[i] == labels[i]:\n if predictions[i] == 1:\n true_pos_rate += 1\n else:\n true_neg_rate += 1\n \n sensitivity = true_pos_rate / pos\n specificity = true_neg_rate / neg\n\n return (sensitivity, specificity)", "def accuracy(targets: List[int], preds: Union[List[float], List[List[float]]], \n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def accuracy(targets: List[int],\n preds: Union[List[float], List[List[float]]],\n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def decide(el, il, model, threshold):\n\n if model == 0:\n return el >= threshold[0] and il >=threshold[1]\n elif model == 1:\n return el >= threshold[0] or il >= threshold[1]\n elif model == 2:\n return harmonic_mean([el, il]) >= harmonic_mean(threshold)\n else:\n return bool(round(random.random()))", "def _performance(Classifier, features, labels, threshold):\n correct = 0\n for index, vector in enumerate(features):\n result = _minimal_predict(Classifier, vector, threshold)\n if result == \"Positive\" and labels[index] == 1.0 or result == \"Negative\" and \\\n labels[index] == 0.0 or result == \"Neutral\":\n correct += 1\n Classifier.performance = correct / len(labels) * 100\n return Classifier.performance", "def specificity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n hard_preds = [1 if p > threshold else 0 for p in preds]\n tn, fp, _, _ = confusion_matrix(targets, hard_preds).ravel()\n return tn / float(tn + fp)", "def evaluate(self, threshold=0.5):\n pass", "def predict(self, X):\n z = self.transform(X)\n pred = z < self.threshold\n return pred", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity", "def get_thresholdtable_from_fpr(scores,labels, fpr_list):\n threshold_list = []\n live_scores = []\n for score, label in zip(scores,labels):\n if label == 0:\n live_scores.append(float(score))\n live_scores.sort(reverse=True)\n live_nums = len(live_scores)\n for fpr in fpr_list:\n i_sample = int(fpr * live_nums)\n i_sample = max(1, i_sample)\n threshold_list.append(live_scores[i_sample - 1])\n return threshold_list", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def round_using_t(prediction, threshold):\n return (prediction >= threshold).astype('int')", "def preds_proba_to_preds_class(preds_proba,threshold):\n return [True if pred > threshold else False for pred in preds_proba]", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def partial_match_score(\n truth: List[Rationale], pred: List[Rationale], thresholds: List[float]\n) -> List[PartialMatchScore]:\n\n ann_to_rat = _keyed_rationale_from_list(truth)\n pred_to_rat = _keyed_rationale_from_list(pred)\n\n num_classifications = {k: len(v) for k, v in pred_to_rat.items()}\n num_truth = {k: len(v) for k, v in ann_to_rat.items()}\n ious: Dict[str, Dict[str, float]] = defaultdict(dict)\n for k in set(ann_to_rat.keys()) | set(pred_to_rat.keys()):\n for p in pred_to_rat.get(k, []):\n best_iou = 0.0\n for t in ann_to_rat.get(k, []):\n num = len(\n set(range(p.start_token, p.end_token))\n & set(range(t.start_token, t.end_token))\n )\n denom = len(\n set(range(p.start_token, p.end_token))\n | set(range(t.start_token, t.end_token))\n )\n iou = 0 if denom == 0 else num / denom\n if iou > best_iou:\n best_iou = iou\n ious[k][p] = best_iou\n\n scores: List[PartialMatchScore] = []\n for threshold in thresholds:\n threshold_tps: Dict[str, float] = {}\n for k, vs in ious.items():\n threshold_tps[k] = sum(int(x >= threshold) for x in vs.values())\n micro_r = (\n sum(threshold_tps.values()) / sum(num_truth.values())\n if sum(num_truth.values()) > 0\n else 0\n )\n micro_p = (\n sum(threshold_tps.values()) / sum(num_classifications.values())\n if sum(num_classifications.values()) > 0\n else 0\n )\n micro_f1 = _f1(micro_r, micro_p)\n macro_rs = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_truth.items()\n )\n macro_ps = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0\n for k, n in num_classifications.items()\n )\n macro_r = sum(macro_rs) / len(macro_rs) if len(macro_rs) > 0 else 0\n macro_p = sum(macro_ps) / len(macro_ps) if len(macro_ps) > 0 else 0\n macro_f1 = _f1(macro_r, macro_p)\n\n scores.append(\n PartialMatchScore(\n threshold=threshold,\n micro=InstanceScore(p=micro_p, r=micro_r, f1=micro_f1),\n macro=InstanceScore(p=macro_p, r=macro_r, f1=macro_f1),\n )\n )\n\n return scores", "def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)", "def get_tpr_from_threshold(scores,labels, threshold_list):\n tpr_list = []\n hack_scores = []\n for score, label in zip(scores,labels):\n if label == 1:\n hack_scores.append(float(score))\n hack_scores.sort(reverse=True)\n hack_nums = len(hack_scores)\n for threshold in threshold_list:\n hack_index = 0\n while hack_index < hack_nums:\n if hack_scores[hack_index] <= threshold:\n break\n else:\n hack_index += 1\n if hack_nums != 0:\n tpr = hack_index * 1.0 / hack_nums\n else:\n tpr = 0\n tpr_list.append(tpr)\n return tpr_list", "def predict(self, X, threshold=0.5):\n\n return [int(self._predict(Xi) >= threshold) for Xi in X]", "def pred_from_prob(a,threshold):\n bin_preds = np.zeros((np.size(a,0),))\n bin_preds[np.where(a[:,1]>threshold)]=1.0\n return bin_preds", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold" ]
[ "0.6548085", "0.6381974", "0.6354342", "0.6264731", "0.6184546", "0.6126238", "0.61081564", "0.60839015", "0.6067138", "0.60335726", "0.5974422", "0.59717226", "0.5970143", "0.59656197", "0.5960877", "0.5954389", "0.59533507", "0.5947933", "0.5928161", "0.5925397", "0.59252214", "0.5909234", "0.59064037", "0.5898637", "0.58979887", "0.5890211", "0.5889917", "0.5866966", "0.58560133", "0.58470017" ]
0.7472907
0
Takes dictionaries of predicted and ground truth and returns confusion matrix
def confusion_matrix(predicted, gt): tp = [k for k in predicted if predicted[k] and gt[k]] tn = [k for k in predicted if not predicted[k] and not gt[k]] fp = [k for k in predicted if predicted[k] and not gt[k]] fn = [k for k in predicted if not predicted [k] and gt[k]] return tp, tn, fp, fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confusion_matrix_intersection_mats(groundtruth, predicted):\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['tn'] = np.logical_and(groundtruth, predicted_inverse)\n confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)\n confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)\n\n return confusion_matrix_arrs", "def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3", "def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])", "def confusionMatrix(actual, predict, truePositiveClass=''):\n classes = list(set(actual + predict))\n if len(truePositiveClass) > 0:\n id0 = classes.index(truePositiveClass)\n classes[id0] = classes[0]\n classes[0] = truePositiveClass\n cMatrix = np.zeros( (len(classes), len(classes)) )\n\n for i in range(0,len(predict)):\n ida = classes.index(actual[i])\n idp = classes.index(predict[i])\n cMatrix[ida][idp] += 1\n return cMatrix", "def confusion_matrix(classifier_output, true_labels):\n\n # TODO: finish this.\n true_pos = 0.0\n true_neg = 0.0\n false_neg = 0.0\n false_pos = 0.0\n for elem1,elem2 in zip(classifier_output, true_labels):\n if(elem1==elem2) and (elem1==1):\n true_pos += 1\n elif(elem1==elem2) and (elem2!=1):\n true_neg += 1\n elif(elem1 != 1):\n false_neg +=1\n else:\n false_pos +=1\n conf_matrix = np.array([[true_pos, false_neg],[false_pos, true_neg]])\n return conf_matrix", "def get_confmatrix(self,y_pred,y_test):", "def confusion_matrix(actual: list, predicted: list) -> list:\n return confusion_matrix(actual, predicted)", "def Confusion_Matrix(predicted_labels: list, actual_labels: list):\n labels = set(actual_labels)\n\n predicted_labels = list(map(custom_round, predicted_labels))\n\n matrix = pd.DataFrame(index=labels, columns=labels)\n\n matrix = matrix.fillna(0)\n\n for i in range(len(actual_labels)):\n matrix[actual_labels[i]][predicted_labels[i]] += 1\n m = matrix.values\n\n plt.matshow(m, cmap=plt.cm.Blues)\n\n for i in range(2):\n for j in range(2):\n c = m[j, i]\n plt.text(i, j, str(c), va='center', ha='center')\n\n plt.show()", "def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix", "def confusion_matrix(links_true, links_pred, total=None):\n\n links_true = _get_multiindex(links_true)\n links_pred = _get_multiindex(links_pred)\n\n tp = true_positives(links_true, links_pred)\n fp = false_positives(links_true, links_pred)\n fn = false_negatives(links_true, links_pred)\n\n if total is None:\n tn = numpy.nan\n else:\n if isinstance(total, pandas.MultiIndex):\n total = len(total)\n tn = true_negatives(links_true, links_pred, total)\n\n return numpy.array([[tp, fn], [fp, tn]])", "def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval", "def confusion_matrix(y_true, y_pred, labels):\n\n #Define variables\n matrix = []\n #Creates matrix dimensions\n for i in range(len(labels)):\n matrix.append([])\n for j in range(len(labels)):\n matrix[i].append(0)\n\n for i in range(len(y_true)):\n trueIndex = -1\n predIndex = -1\n #Get indexes of true and predicted values\n for j, label in enumerate(labels):\n if(label == y_true[i]):\n trueIndex = j\n if(label == y_pred[i]):\n predIndex = j\n matrix[trueIndex][predIndex] = matrix[trueIndex][predIndex] + 1\n\n return matrix", "def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total", "def Evaluate_Prediction(prediction_mask, true_mask, feature_dict, \n test_name = 'Test'):\n \n # true_mask has 3 layers but they are redundant\n true_mask = true_mask[:,:,0]\n \n # Convert from Prob to 0,1,2...\n prediction_mask = prediction_mask.argmax(axis = 2) + 1 \n\n # Compute confusion matrix -- subtract 1 so that first label is \"0\" \n conf = custom_confusion_matrix(prediction_mask.flatten(), true_mask.flatten(), feature_dict)\n \n # Convert mask to proper shape for loss function - shape should have 4 dimensions with one-hot encoding\n true_mask = Expand_Mask(mask = true_mask, num_class = len(feature_dict)) ## to 0,1\n true_mask = np.expand_dims(true_mask, axis=0)\n true_mask = true_mask.astype(np.float)\n\n # Convert prediction into proper shape for loss function\n prediction_mask = Expand_Mask(mask = prediction_mask, num_class = len(feature_dict)) #to 0,1\n prediction_mask = np.expand_dims(prediction_mask, axis=0) \n prediction_mask = prediction_mask.astype(np.float)\n \n score = {'Test':test_name, \n 'Dice':Dice_Coef_Multilabel(true_mask, prediction_mask).numpy(), \n 'Accuracy':np.mean(tf.metrics.categorical_accuracy(true_mask, prediction_mask)), \n 'CE':np.mean(tf.metrics.categorical_crossentropy(true_mask, prediction_mask))}\n \n return [score, conf]", "def custom_confusion_matrix(prediction_vector, true_vector, feature_dict ):\n \n values = list(feature_dict.keys())\n values.sort()\n nvals = len(values)\n confusion_matrix = np.zeros((nvals, nvals))\n for i in range(len(values)):\n for j in range(len(values)):\n mask = (true_vector==values[i]) & (prediction_vector==values[j]) \n confusion_matrix[i,j] = mask.sum()\n \n return confusion_matrix", "def confusion_matrix(\n true_labels,\n predicted_labels\n ) -> np.array:\n n_samples_true, n_samples_predicted = len(true_labels), len(predicted_labels)\n if n_samples_true != n_samples_predicted:\n raise ValueError()\n n_classes = len(set(true_labels))\n matrix = np.zeros((n_classes,n_classes))\n for i in range(len(true_labels)):\n true_label = true_labels[i]\n predicted_label = predicted_labels[i]\n matrix[predicted_label][true_label] += 1\n return matrix", "def confusion_matrix(predict, labels, num_classes):\n # Compute the count of correct and error samples in each snr.\n conf = np.zeros([num_classes, num_classes])\n for i in range(0, len(labels)):\n j = labels[i]\n k = np.argmax(predict[i])\n conf[j, k] = conf[j, k] + 1\n\n # Compute the count of correct and error ratio in each snr.\n # =====confusion matrix=====.\n conf_norm = np.zeros([num_classes, num_classes])\n for i in range(0, num_classes):\n conf_norm[i, :] = conf[i, :] / np.sum(conf[i, :])\n\n return conf_norm", "def confusion_matrix(self, y_true=None, y_pred=None, labels=None, normalize=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal=None)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize)\n return matrix, imap, imap_count", "def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity", "def make_metrics(self, predictions):\n\n pred_idx = []\n pred_classes = []\n\n target_idx = []\n target_classes = []\n target_count = len(self._dataset.class_idx2text)\n\n for data_id, pred in predictions.items():\n target = self._dataset.get_ground_truth(data_id)\n\n pred_idx.append(pred[\"class_idx\"])\n pred_classes.append(self._dataset.class_idx2text[pred[\"class_idx\"]])\n\n target_idx.append(target[\"class_idx\"])\n target_classes.append(target[\"class_text\"])\n\n metrics = {\n \"accuracy\": simple_accuracy(pred_idx, target_idx),\n }\n\n if target_count == 2:\n # binary class\n f1_metric = f1(pred_idx, target_idx)\n metrics.update(f1_metric)\n\n matthews_corr_metric = matthews_corr(pred_idx, target_idx)\n metrics.update(matthews_corr_metric)\n return metrics", "def confusion_matrix_(y_true, y_pred, labels=None):\r\n tp = 0\r\n tn = 0\r\n fp = 0\r\n fn = 0\r\n if labels == None:\r\n values = list(set(y_true))\r\n else:\r\n values = labels\r\n if (len(values)) != 2:\r\n return None\r\n for i, elem in enumerate(y_true):\r\n if y_pred[i] == values[1] and y_true[i] == y_pred[i]:\r\n tp += 1\r\n elif y_pred[i] == values[1] and y_true[i] != y_pred[i]:\r\n fp += 1\r\n elif y_pred[i] == values[0] and y_true[i] == y_pred[i]:\r\n tn += 1\r\n elif y_pred[i] == values[0] and y_true[i] != y_pred[i]:\r\n fn += 1\r\n matrix = np.array([[tp, fp], [fn, tn]])\r\n return matrix", "def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix", "def confusion_matrix(y_true, y_pred, table_show=True):\n\tFIRST_CLASS = 1\n\tSECOND_CLASS = 0\n\n\tzipped = np.array(list(zip(y_true, y_pred)))\n\ttp, fn, fp, tn = 0, 0, 0, 0\n\n\tfor y_true, y_pred in zipped:\n\t\tif y_true == y_pred and y_true == FIRST_CLASS:\n\t\t\ttp += 1\n\t\telif y_true == y_pred and y_true == SECOND_CLASS:\n\t\t\ttn += 1\n\t\telif y_true != y_pred and y_true == SECOND_CLASS:\n\t\t\tfp += 1\n\t\telse:\n\t\t\tfn += 1\n\n\tif table_show:\n\t\treturn np.array([tp, fn, fp, tn]).reshape([2,2])\n\n\treturn tp, fn, fp, tn", "def confusion_matrix(df):\n rows, true_counts = np.unique(df[\"label\"].values, return_counts=True)\n cols, predicted_counts = np.unique(df[\"label\"].values, return_counts=True)\n\n matrix = np.ndarray(shape=(len(rows), len(cols)), dtype=float)\n for ri, row in enumerate(rows):\n for ci, col in enumerate(cols):\n matrix[ri][ci] = len(df[(df.label == row) & (df.classification == col)])\n\n return matrix, rows, cols", "def confusion_matrix(gt, pred) -> np.ndarray:\n \n # Number of classes inferred from gt. Assuming classes are enumerated 0 ..\n n_classes = gt.max() + 1\n cm = np.zeros((n_classes, n_classes), dtype=np.uint32)\n \n # Fill matrix\n for gt_class in range(n_classes):\n for pred_class in range(n_classes):\n cm[pred_class, gt_class] = ((pred == pred_class) & (gt == gt_class)).sum()\n \n return cm", "def confusion_matrix_pd(Y_true, Y_pred):\n Y_true = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_true, axis=1)])\n Y_pred = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_pred, axis=1)])\n return pd.crosstab(Y_true, Y_pred, rownames=['True'], colnames=['Pred'])", "def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()", "def _prep_confusion_matrix(self, y_test, y_pred, labels):\n\n # Calculate confusion matrix and flatten it to a simple array\n if len(y_test.shape) == 1:\n confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel()\n\n # Structure into a DataFrame suitable for Qlik\n result = []\n i = 0\n for t in labels:\n for p in labels:\n result.append([str(t), str(p), confusion_array[i]])\n i = i + 1\n self.model.confusion_matrix = pd.DataFrame(result, columns=[\"true_label\", \"pred_label\", \"count\"])\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)\n # Handle confusion matrix format for multi-label classification\n else:\n confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred)\n result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=[\"true_negative\", \"false_positive\", \"false_negative\", \"true_positive\"])\n self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=[\"step\"])\n self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1)\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)", "def compute_confuse_matrix(y_targetlabel_list_single, y_logit_array_single, label_dict, name='default'):\n #1.get target label and predict label\n # y_target_labels=get_target_label_short(y_targetlabel_list_single) #e.g. y_targetlabel_list[0]=[2,12,88]\n y_target_labels = y_targetlabel_list_single\n\n # y_predict_labels=[i for i in range(len(y_logit_array_single)) if y_logit_array_single[i]>=0.50] #TODO 0.5PW e.g.[2,12,13,10]\n # y_predict_labels= y_logit_array_single.index(min(y_logit_array_single))\n\n flag = max(y_logit_array_single)\n y_predict_labels = []\n for i in range(len(y_logit_array_single)):\n if abs(y_logit_array_single[i] - flag) < 0.1:\n y_predict_labels.append(i)\n\n a = list(set(y_target_labels))\n b = list(set(y_predict_labels))\n acc = operator.eq(a,b)\n\n #if len(y_predict_labels)<1: y_predict_labels=[np.argmax(y_logit_array_single)] #TODO ADD 2018.05.29\n if random.choice([x for x in range(random_number)]) ==1:\n print(name+\".y_target_labels:\",y_target_labels,\";y_predict_labels:\",y_predict_labels) #debug purpose\n\n #2.count number of TP,FP,FN for each class\n y_labels_unique=[]\n y_labels_unique.extend(y_target_labels)\n y_labels_unique.extend(y_predict_labels)\n y_labels_unique=list(set(y_labels_unique))\n for i,label in enumerate(y_labels_unique): #e.g. label=2\n TP, FP, FN = label_dict[label]\n if label in y_predict_labels and label in y_target_labels:#predict=1,truth=1 (TP)\n TP=TP+1\n elif label in y_predict_labels and label not in y_target_labels:#predict=1,truth=0(FP)\n FP=FP+1\n elif label not in y_predict_labels and label in y_target_labels:#predict=0,truth=1(FN)\n FN=FN+1\n label_dict[label] = (TP, FP, FN)\n return label_dict, acc", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() " ]
[ "0.7215619", "0.71285826", "0.7120823", "0.70471257", "0.70212066", "0.6992223", "0.696946", "0.6948753", "0.6921117", "0.69178545", "0.6913209", "0.69081", "0.68527514", "0.6760738", "0.675917", "0.6750563", "0.6734669", "0.6700748", "0.6682267", "0.6672295", "0.66603005", "0.6654966", "0.6634522", "0.66302985", "0.66160774", "0.6586201", "0.6572117", "0.6544181", "0.65006495", "0.64981025" ]
0.72815716
0
Returns argmax, max of dictionary
def argmax(d): return max(d.iteritems(), key=operator.itemgetter(1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]", "def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax", "def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def max_key (dict):\n output = -1\n for key, value in dict.items():\n output = max(output, key)\n return output", "def keywithmaxval(dictionary): # from https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary/12343826#12343826 \n\tv=list(dictionary.values())\n\tk=list(dictionary.keys())\n\treturn k[v.index(max(v))]", "def keywithmaxval(kwmv_dict):\n values = list(kwmv_dict.values())\n keys = list(kwmv_dict.keys())\n return keys[values.index(max(values))]", "def keywithmaxval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(max(v))]", "def find_max_key_val_in_dict(in_dict):\n\tmax_key = None\n\tmax_val = -np.inf\n\tfor key,val in in_dict.iteritems():\n\t\tif val >= max_val:\n\t\t\tmax_val = val\n\t\t\tmax_key = key\n\treturn (max_key,max_val)", "def argmax(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmax\")\n return k, cast(pdarray, v)", "def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]", "def max(*args, **kwargs):\n key = kwargs.get(\"key\", lambda x: x)\n args = args[0] if len(args) == 1 else args[:]\n max_value = \"\"\n for arg in args:\n if max_value == \"\":\n max_value = arg\n max_value = arg if key(arg) > key(max_value) else max_value\n return max_value", "def keymaxval (dictionary):\n values = list (dictionary.values())\n return list(dictionary.keys())[values.index(max(values))]", "def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i", "def most_occured(dict):\n\n max = dict['e']\n max_alpha = 'e'\n\n for i, j in zip(dict.values(), dict.keys()):\n\n if max < i:\n max = i\n max_alpha = j\n \n return max_alpha", "def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def data_dict_max(data_dict, feature):\n name = max(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict), key=lambda k: data_dict[k][feature])\n\n return name, data_dict[name][feature]", "def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )", "def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])", "def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val", "def argmax(self, values):\n return self.aggregate(values, \"argmax\")", "def dict_max(dic):\n cnt = 0\n for i in dic:\n if dic[i] > cnt:\n cnt = dic[i]\n return cnt", "def test_perf_max():\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import OrderedTreeDict;\"\n \"keys_tree = OrderedTreeDict((key, key) for key in range(1000, -1000, -1))\",\n number=1000\n )\n assert dict_time > tree_time, \"Max method is slow.\"\n assert dict_sort_time > tree_time, \"Max method is slow.\"", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]" ]
[ "0.7893945", "0.7893012", "0.78599966", "0.7807235", "0.7397014", "0.7194823", "0.7194823", "0.7194823", "0.7170892", "0.7158785", "0.7147766", "0.71402246", "0.71295154", "0.71241313", "0.7121471", "0.7005879", "0.6961847", "0.6961493", "0.69519615", "0.68631655", "0.6839727", "0.6824366", "0.68026197", "0.67939425", "0.673112", "0.6730373", "0.6664163", "0.66009325", "0.657136", "0.657136" ]
0.83839536
0
Produce nboot bootstrap samples from applying func to data
def bootstrap(data,func,nboot): n = len(data) resamples = np.array([[random.choice(data) for i in range(n)] for j in range(nboot)]) return np.apply_along_axis(func, 1, resamples)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def bootstrap_replicate_1d(data, func):\r\n bs_sample = np.random.choice(data, len(data))\r\n return func(bs_sample)", "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n\n return func(bs_sample)", "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)", "def bootstrap_statistic(data, stats_fn, num_samples):\n return [stats_fn(bootstrap_sample(data)) for _ in range(num_samples)]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def dataset_augmentation(data_start, bootstrapping = 1, epurate = 1, shuffle = True):\n data = data_start\n for ii in range(bootstrapping):\n data = data.append(data_start.apply(bootstrap_sample, axis=1), ignore_index=True)\n\n#Bugged version that weirdly works well....\n# for ii in range(bootstrapping):\n # data = data.append(bootstrap_sample(data_start), ignore_index=True)\n\n for ii in range(epurate):\n data = data.append(data_start.apply(epurate_sample, axis=1), ignore_index=True)\n\n # Shuffling (Important)\n if shuffle == True:\n data = data.sample(frac=1)\n return data", "def bootstrap(X):\n return X[np.random.choice(list(range(X.shape[0])), size=X.shape[0]), :]", "def bootstrap(x, iter=int(1E6), return_samples=False):\n\n \n means = np.empty(iter) \n dfs = []\n for i in tqdm(range(iter), desc='Performing bootstrap sampling'):\n resamp = np.random.choice(x, size=len(x), replace=True)\n means[i] = resamp.mean()\n\n if return_samples:\n _df = pd.DataFrame([])\n _df['value'] = resamp\n _df['iter'] = i + 1\n dfs.append(_df)\n\n # Compute confidence intervals of the means.\n mean_val = means.mean()\n bounds_ci = {'99%': (0.5, 99.5), '95%': (2.5, 97.5), '90%': (5, 95),\n '75%': (12.5, 87.5), '50%': (25, 75), '25%': (37.5, 62.5),\n '10%': (45, 55), '5%': (47.5, 52.5), '1%': (49.5, 50.5)} \n cis = {} \n for k, v in bounds_ci.items():\n bounds = np.percentile(means, v)\n cis[k] = bounds\n\n statistics['original_data'] = x\n statistics['resampled_means'] = means\n statistics['mean_value'] = mean_val\n statistics['confidence_intervals'] = cis\n\n if return_samples:\n _df = pd.concat(dfs, sort=False)\n return [statistics, _df]\n else:\n return statistics", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n \r\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def bootstrap(data, num_samples, statistic, alpha):\n n = len(data)\n idx = npr.randint(0, n, (num_samples, n))\n samples = x[idx]\n stat = np.sort(statistic(samples, 1))\n return (stat[int((alpha/2.0)*num_samples)],\n stat[int((1-alpha/2.0)*num_samples)])", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def genBootstrapData(fullData, dirName=\"bootstrap_data/\", ti=None, tf=None, n=1, blockLen=7):\n # If initial (final) time not given, apply block bootstrap to whole data set\n if ti == None:\n ti = 0\n if tf == None:\n tf = fullData.shape[0]\n\n # Reset seed\n np.random.seed()\n\n bsSets = []\n\n for i in range(0, n):\n bsSet = fullData.copy()\n\n # Loop over the sensors\n for sensor in range(fullData.shape[1]):\n # Loop over the blocks\n for tStart in range(ti, tf, blockLen):\n # Resample only the non-nan datapoints\n # TODO: is this a valid way of doing this???\n oldBlockNonNans = bsSet[tStart:tStart+blockLen, sensor].copy()\n oldBlockNonNans = oldBlockNonNans[np.isfinite(oldBlockNonNans)]\n\n for t in range(tStart, min(tStart + blockLen, fullData.shape[0])):\n if not np.isnan(bsSet[t, sensor]):\n bsSet[t, sensor] = np.random.choice(oldBlockNonNans, 1, replace=False)\n\n bsSets.append(bsSet)\n\n # Save the dataset\n np.savetxt(dirName + \"/blockLen=%i_%i.csv\"%(blockLen, i), bsSet, delimiter=\" \", fmt=\"%f\")\n\n return bsSets", "def bs_replicate(data, func=np.mean):\n bs_sample = np.random.choice(data, replace=True, size=len(data))\n return func(bs_sample)", "def compute_bootstrapped_sample(X_table, y_table):\n n = len(X_table)\n X_sample = []\n y_sample = []\n for _ in range(n):\n rand_index = random.randrange(0, n)\n X_sample.append(X_table[rand_index])\n y_sample.append(y_table[rand_index])\n return X_sample, y_sample", "def empirical_bootstrap(self, pop_data: np.ndarray, n = None, B = 1000, func=None):\n # store the estimates for each bootstrapped sample\n n = pop_data.shape[0] if n is None else n\n boot_est = [None] * B\n index = 0\n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n est = func(pop_data[idx], axis=0)\n boot_est[index] = est\n index += 1\n \n result = {}\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est)\n result['est_err'] = np.std(boot_est, ddof=1)\n \n return result", "def bootstrap(data, alpha=0.05, n_bootstrap = 2000, func=None, **func_args):\n\t\n\tassert data.ndim == 3, 'Data is not 3-dimensional. Function only works for 3-D data.' \n\t\n\t# Trials form the second dimension\n\tn_trials = data.shape[1]\n\t\n\t# generate randomised bootstrap resamples as random indices\n\tbootstrap_index = np.random.randint(0, n_trials, \n\t\t\t\t\t\t\t\t\t\t(n_trials, n_bootstrap) )\n\t\n\t# For each bin in the histogram, randomly samples from the results\n\t# of each trial and repeats, effectively, n_bootstrap times \n\ttrials_bootstrap = data[:, bootstrap_index, :]\n\t\n\t# dimension one is the trials, zero is the conditions; this averaging \n\t# goes across the trials creating a PSTH for each condition, and,\n\t# importantly, for each bootstrap resample\n\tavg_bootstrap = trials_bootstrap.mean(axis=1)\n\t\n\tif func:\n\t\tavg_bootstrap = func(avg_bootstrap, **func_args)\n\t\t\n\t# find percentile values for each bin along the bootstrap resamples,\n\t# which are on axis 1 \n\tCI_pos = np.percentile(avg_bootstrap, 100*(1 - (alpha/2.)), \n\t\t\t\t\t\t\t\taxis=1)\n\tCI_neg = np.percentile(avg_bootstrap, 100*(alpha/2.), \n\t\t\t\t\t\t\t\taxis=1)\n\n\n\treturn CI_pos, CI_neg", "def main():\n df = pd.read_csv('data/Boston.csv')\n n_obs = len(df)\n np.random.seed(111)\n\n # Part a\n medv_mean = np.mean(df['medv'])\n print('medv mean = {:.3f}'.format(medv_mean))\n\n # Part b\n medv_stan_err = statistics.stdev(df['medv']) / np.sqrt(n_obs)\n print('medv standard error = {:.5f}'.format(medv_stan_err))\n\n # Part c\n n_boot_iters = 10000\n medv_mean_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_mean_array[ii] = np.mean(df.loc[ind, 'medv'])\n\n medv_stan_err_boot = statistics.stdev(medv_mean_array)\n print('medv standard error (bootstrap) = {:.5f}'.format(medv_stan_err_boot))\n\n # Part d\n ci_95 = [medv_mean - 2 * medv_stan_err,\n medv_mean + 2 * medv_stan_err]\n ci_95_boot = [medv_mean - 2 * medv_stan_err_boot,\n medv_mean + 2 * medv_stan_err_boot]\n print('95% CI = [{:.3f}, {:.3f}]'.format(ci_95[0], ci_95[1]))\n print('95% CI (bootstrap) = [{:.3f}, {:.3f}]'.format(ci_95_boot[0], ci_95_boot[1]))\n\n # Part e\n medv_med = np.median(df['medv'])\n print('medv med = {:.3f}'.format(medv_med))\n\n # Part f\n medv_med_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_med_array[ii] = np.median(df.loc[ind, 'medv'])\n\n medv_med_stan_err_boot = statistics.stdev(medv_med_array)\n print('medv median standard error (bootstrap) = {:.5f}'.format(medv_med_stan_err_boot))\n\n # Part g\n medv_10 = np.percentile(df['medv'], 10)\n print('medv 10th percentile = {:.3f}'.format(medv_10))\n\n # Part f\n medv_10_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_10_array[ii] = np.percentile(df.loc[ind, 'medv'], 10)\n\n medv_10_stan_err_boot = statistics.stdev(medv_10_array)\n print('medv 10th percenile standard error (bootstrap) = {:.5f}'.format(medv_10_stan_err_boot))", "def get_bootstraps(self):\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()", "def bootstrapping_variance_estimation(data, iterations=100):\n bootstrapped_variance = []\n for i in tqdm(range(1)):\n data_at_index_i = [elem for elem in data]\n\n variance_estimation = []\n for _ in range(iterations):\n bootstrapped_data = []\n for _ in range(len(data_at_index_i)):\n bootstrapped_data.append(np.random.choice(data_at_index_i))\n variance_estimation.append(np.var(bootstrapped_data))\n\n bootstrapped_variance.append(np.mean(variance_estimation, axis=0))\n\n return bootstrapped_variance", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n\r\n resample_i = N.floor(N.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def bootstrap(df, nclass, if_new=False):\n ori_size = Counter(df.label)\n logger.info(f'class info before resampling: {ori_size.values()}')\n ori_size_list = list(ori_size.values())\n\n if if_new:\n df_new = pd.DataFrame(data=None, columns=df.columns)\n target_size = min(ori_size_list)\n else:\n target_size = max(ori_size_list)\n df_new = df.copy()\n\n for i in range(nclass):\n name = list(ori_size.keys())[i]\n name_index = np.array(df[df.label == name].index)\n if target_size < ori_size_list[i]:\n sample_size = target_size\n elif target_size > ori_size_list[i]:\n sample_size = target_size - ori_size_list[i]\n else:\n if if_new:\n sample_size = target_size\n else:\n sample_size = 0\n\n np.random.seed(i)\n boostrap_sample = np.random.randint(0, ori_size_list[i], sample_size)\n df_new = df_new.append(df.iloc[name_index[boostrap_sample]], ignore_index=True)\n logger.info(f'class info after resampling: {Counter(df_new.label).values()}')\n return df_new", "def bootstrap(data, iterations=10000):\n\n boot_mean = []\n\n for n in range(0, iterations):\n\n boot = resample(data, replace=True, n_samples=None,\n random_state=None)\n\n boot_mean.append(np.mean(boot))\n\n final_mean = np.mean(boot_mean)\n\n final_std = np.std(boot_mean, dtype=np.float64)\n\n return final_mean, final_std", "def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out", "def bootstrap_resample(labels):\n idxs = np.arange(len(labels))\n num_labels = max(labels) + 1\n bootstrap_idxs = np.zeros_like(idxs)\n ptr = 0\n for i in range(num_labels):\n strat = idxs[labels == i]\n bootstrap_idxs[ptr:ptr + len(strat)] = np.random.choice(strat, len(strat), replace=True)\n ptr += len(strat)\n return bootstrap_idxs" ]
[ "0.7176311", "0.71086556", "0.7089425", "0.7087817", "0.7020718", "0.68657595", "0.6752474", "0.6727999", "0.6657717", "0.63728184", "0.62784743", "0.62304413", "0.62304413", "0.61555386", "0.60460633", "0.6008815", "0.6005979", "0.6003459", "0.59947544", "0.5991532", "0.59781086", "0.5943359", "0.5922075", "0.59154767", "0.58596087", "0.5840936", "0.5814761", "0.5803637", "0.58029264", "0.5788222" ]
0.8077993
0
trace finds the line, the filename and error message and returns it to the user
def trace(): import traceback tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # script name + line number line = tbinfo.split(", ")[1] # Get Python syntax error # synerror = traceback.format_exc().splitlines()[-1] return line, __file__, synerror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror", "def __call__(self, line):\n marker = self.marker\n stripped_line = line.strip()\n if marker == stripped_line:\n assert not self.traceback_section\n self.traceback_section = True\n # print(\"XXX: TRACEBACK-START\")\n elif self.traceback_section:\n matched = self.file_pattern.match(line)\n if matched:\n # matched_range = matched.regs[1]\n filename = matched.groups()[0]\n new_filename = posixpath_normpath(filename)\n if new_filename != filename:\n # print(\"XXX: %r => %r\" % (filename, new_filename))\n line = line.replace(filename, new_filename)\n elif not stripped_line or line[0].isalpha():\n # -- DETECTED TRCAEBACK-END: exception-description\n # print(\"XXX: TRACEBACK-END\")\n self.traceback_section = False\n return line", "def traceback(self):", "def format_backtrace(trace):\n backtrace = []\n for filename, line, func, _ in traceback.extract_tb(trace):\n desc = {'file': filename,\n 'line': line,\n 'function': func,\n 'text': _}\n backtrace.append(desc)\n return backtrace", "def gettrace(): # real signature unknown; restored from __doc__\n pass", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def trace(self, trace=...):\n ...", "def report(self, line: int, where: str, message: str):\n output = f'[line {line}] Error{where}: {message}'\n print(output, file=sys.stderr)\n self.had_error = True", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def _parse_traceback(self, trace):\n p_traceback = [ \"%s:%d:in `%s'\" % (filename, lineno, funcname) \n for filename, lineno, funcname, _\n in traceback.extract_tb(trace) ]\n p_traceback.reverse()\n\n return p_traceback", "def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def localTraceback(self, alwaysPrint = False):\n self.log( \"DEBUG TRACEBACK: \" )\n for line in traceback.format_stack():\n self.logPre( line, alwaysPrint )", "def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def lineno():\n return str(' - Principal - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def find_traceback_start(self):\n ### FILL IN ###", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def tidy_error(ex=None) -> str:\r\n from sys import exc_info\r\n from os.path import join, abspath, dirname\r\n from traceback import extract_tb, format_list, format_exception_only\r\n\r\n show = join(dirname(abspath(__file__)), '')\r\n\r\n def _check_file(name):\r\n return name and name.startswith(show)\r\n\r\n def _print(typ, value, tb): # If not debug, generator expression: filter trace to my files.\r\n show = extract_tb(tb) if DEBUG else (fs for fs in extract_tb(tb, limit=3) if _check_file(fs.filename))\r\n fmt = format_list(show) + format_exception_only(typ, value)\r\n return ''.join((f.strip('\"\\'').replace('\\\\n', '') for f in fmt))\r\n\r\n args = ex or exc_info()\r\n return _print(*args)", "def lineno():\n return str(' - SecurityGroupIngressPortRangeRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)", "def exceptionTraceback(self, alwaysPrint = False):\n self.logPre( traceback.format_exc(), alwaysPrint )", "def trace_function(frame, event, arg):\n co = frame.f_code\n func_name = co.co_name\n if func_name == 'write':\n # Ignore write() calls from print statements\n return\n filename = co.co_filename\n if event == 'call':\n # decend into the stack...\n return trace_function\n elif event == 'return':\n if isinstance(arg, basestring) and 'inputlocator' in filename.lower() and not func_name.startswith('_'):\n results_set.add((func_name, arg))\n # print('%s => %s' % (func_name, arg))\n return", "def debug(line):\n sys.stderr.write(line + \"\\n\")\n sys.stderr.flush()", "def trace(string):\n if trace_enabled:\n print(string)", "def lineno():\n return str(' - RDSInstanceMasterUserPasswordRule- caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def _debug_calc_error(self, line):\n debug(\"RPN Calculator Error: %s\" % line)", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])" ]
[ "0.7392801", "0.7135186", "0.6529018", "0.63906014", "0.6296253", "0.6159555", "0.6074369", "0.6055162", "0.6053701", "0.60512197", "0.60511047", "0.60449284", "0.6025194", "0.5981327", "0.5965538", "0.592666", "0.5902943", "0.5831507", "0.580618", "0.5780158", "0.57784814", "0.57647014", "0.5753568", "0.5750067", "0.5749855", "0.5743602", "0.57275677", "0.57223684", "0.57033956", "0.57029104" ]
0.7310826
1
In this function, you will instantiate several times, given the data provided. Then, you will open "sh_additional_info.csv" and for each line in that file, perform an operation using one of the methods of one of your classes. Follow the commented instructions in this main() function. Refer to Problem Set 07 README.md for instructions and tips.
def main(): # Refer to Problem Set 07 README.md for instructions and tips. # 6.1: Read in < sh_basic_info.csv > basic_info = read_csv_file('sh_basic_info.csv') # 6.2: Create instances of < SuperHeroine > heroines = {} for hero in basic_info: heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'], hero['eye_color'], hero['hair_color'], hero['base']) print(heroines) # 6.3: Read in < sh_additional_info.csv > additional_info = read_csv_file('sh_additional_info.csv') # 6.4: Add powers and nemesis for row in additional_info: name = row["Heroine Name"] instance_affected = heroines[name] how_affected = row["Category"] value = row['Value'] if how_affected == 'power': instance_affected.add_power(value) else: instance_affected.add_nemesis(value) # 6.5: Write to file write_to_file('storm.txt',heroines['Storm']) write_to_file('scarlet_witch.txt',heroines['Scarlet Witch']) write_to_file('jessica_jones.txt',heroines['Jessica Jones'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethnicity(fe_data)\n fe_data = compressing_marital_status(fe_data)\n fe_data = compressing_religion(fe_data)\n fe_data = compressing_admit_location(fe_data)\n fe_data = compress_icd9_codes(fe_data)\n\n fe_data.to_csv('data/feature_engineering_data.csv')", "def __init__(self):\n self.project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.excel_file = os.path.join(self.project_dir, \"data\", \"Literature_Data.xlsx\")\n self.spreadsheet_name = \"Individualized Data\"\n self.filled_output_file = os.path.join(self.project_dir, \"data\", \"filled_data.csv\")\n self.output_file = os.path.join(self.project_dir, \"data\", \"final.csv\")\n self.use_fake_data = False # For testing\n # This instance value \"self.df\" is the pandas DataFrame that contains all of the data\n # from the literature case studies. Manipulating this field is the purpose of this class.\n \n self.num_negative = 500\n self.df = None", "def main():\n try:\n\n logger = settings.get_logger(__name__)\n\n # if not (check_for_data()):\n # raise Exception(\"The following files were not all found: %s\"%(\"files\")) \n\n logger.info(\"*** Import data from raw files ***\")\n #load raw file\n\n logger.info(\"Load raw data file (Huge file, please be patient)...\")\n p1c1File = \"histo_7cerf_p1c1.txt\"\n df_histo_p2c1_jour = pd.read_csv(settings.raw_path + p1c1File, sep = \",\", encoding = 'utf-8', header = None,dtype={0:str,2:str,3:str}).fillna(0)\n\n #prepare sales dataframe\n logger.info(\"Droping uneccessary columns...\")\n sales_df= df_histo_p2c1_jour.drop([1,3,4,5,6],axis=1)\n\n #set headers\n logger.info(\"Setting headers info...\")\n end_date = \"01-14-2019\"\n columns = settings.row_headers\n nb_days = len(sales_df.columns) - len(columns)\n date_range = pd.date_range(end = end_date,periods = nb_days, freq='1w').strftime(\"%d/%m/%Y\")\n columns.extend(date_range)\n sales_df.columns = columns\n\n #drop Client 0\n sales_df = sales_df[sales_df[\"Client\"]!=0]\n\n #Get p1c1 keys\n p1c1 = sales_df[[\"Product\",\"Client\"]].dropna().drop_duplicates().astype(str).copy()\n\n #Product table\n logger.info(\"Loading products descriptions...\")\n product_df = get_product_df(\"product_7cerf.txt\")\n #save product season mapping\n save_product_season(product_df)\n\n #Get keys table from previous files\n p1c1p2 = p1c1.join(product_df[[\"Key_lvl2\"]],on =[\"Product\"]).dropna().set_index([\"Product\"]).astype(str)\n\n\n #save sales history\n save_p2_sales(sales_df,p1c1p2)\n \n #Get client talbe\n logger.info(\"Loading clients descriptions...\")\n client_df = get_clients_df(\"client_7cerf.txt\",columns =[\"Store Level\",\"Business Area\"] )\n cli_features = p1c1p2.join(client_df,on=\"Client\",how=\"left\").drop([\"Client\"],axis=1)\n\n \n\n #Calculate store counts\n logger.info(\"Saving store counts file...\")\n save_storecounts(cli_features,p1c1p2)\n\n \n\n #Client counts by p2\n logger.info(\"Saving clients count by product...\")\n save_clients_count(p1c1p2)\n # return True\n except Exception as err:\n print(err)\n logger.error(err)\n # return False", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def main():\n # Load in original data\n origin_data = pd.read_csv('/Users/apple/Desktop/CSE_163/cse163_project/'\n + 'Admission_Predict_Ver1.1.csv',\n sep=r'\\s*,\\s*', header=0, encoding='ascii',\n engine='python')\n\n # Research question 1\n lasso_regression(origin_data)\n\n # Research question 2\n # We drop the 'Serial No.' column because it is unrelated to our analysis.\n df = origin_data.drop(columns=['Serial No.'])\n find_correlation(df)\n boxplots_testscores_vs_admission(df)\n\n # Research question 3\n university_rating_analysis(origin_data)", "def handle(self, *args, **options):\n try:\n # csv path argument\n fpath = options['csv_path']\n # checks if file is csv\n if fpath.lower().endswith('csv'):\n encoding = get_encoding(fpath)\n with open(fpath, encoding=encoding) as file:\n reader = csv.reader(file, delimiter=\",\")\n # iterating through each row in csv\n for row in reader:\n try:\n create_instance(row[0].strip(), row[1].strip(),\n row[2].strip())\n except IntegrityError:\n # duplicate information\n self.stderr.write(self.style.WARNING(\n \"(Warning) \"\n \"Duplicate Information: {}\".format(row)))\n except IndexError:\n # problem extracting missing information\n self.stderr.write(self.style.WARNING(\n \"(Warning) Incorrect Format: {}\".format(row)))\n except ValueError:\n # missing information\n self.stderr.write(self.style.WARNING(\n \"(Warning) Missing Value: {}\".format(row)))\n except ObjectDoesNotExist:\n # queried object does not exist\n self.stderr.write(self.style.WARNING(\n \"(Warning) \"\n \"ObjectDoesNotExist: {}\".format(row)))\n else:\n # file not csv\n self.stderr.write(self.style.ERROR(\"(Error) \"\n \"File not of type csv.\"))\n except FileNotFoundError:\n # file path does not exist\n self.stderr.write(self.style.ERROR(\"(Error) File does not exist.\"))\n except IOError:\n # error reading file\n self.stderr.write(self.style.ERROR(\"(Error) Error reading file.\"))", "def main():\n scores_file = open(\"scores.csv\")\n scores_data = scores_file.readlines()\n print(scores_data)\n subjects = scores_data[0].strip().split(\",\")\n score_values = []\n for score_line in scores_data[1:]:\n score_strings = score_line.strip().split(\",\")\n score_numbers = [int(value) for value in score_strings]\n score_values.append(score_numbers)\n scores_file.close()\n scores_by_subjects = reorganise_score(score_values)\n subject_details(scores_by_subjects, subjects)", "def run():\n options = [\"Add\", \"Remove\", \"Update\", \"Oldest person\", \"Persons closest to average\"]\n common_options = [\"Name: \", \"Year: \"]\n file = \"model/hr/persons.csv\"\n title_list = [\"Id\", \"Name\", \"Year\"]\n choice = None\n dont_clear = False\n while choice != '0':\n if not dont_clear:\n os.system(\"clear\")\n table = data_manager.get_table_from_file(file)\n terminal_view.print_table(table, title_list)\n choice = terminal_view.get_choice_submenu(options)\n dont_clear = False\n if choice == '1':\n common.add(file, common_options)\n elif choice == '2':\n common.remove(file)\n elif choice == '3':\n common.update(file, common_options)\n elif choice == '4':\n terminal_view.print_result(hr.get_oldest_person(table), \"Oldest persons:\\n\")\n dont_clear = True\n elif choice == '5':\n msg = \"Persons with age closest to average:\\n\"\n terminal_view.print_result(hr.get_persons_closest_to_average(table), msg)\n dont_clear = True\n else:\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def main():\n\n #get the csv file into a data-frame\n universities_df = pd.read_csv('universities_data.csv', encoding = 'utf-8-sig')\n universities_names_list = universities_df['name'].tolist()\n\n #get list of university objects\n url = 'http://universities.hipolabs.com/search?country=Israel'\n api_universities = Get_universities(url)\n list_of_universities = api_universities.get_universities_info()\n\n #to see if we got new entities or not for exporting to csv later..\n is_new_entities = False\n\n for university in list_of_universities:\n if university.name not in universities_names_list:\n is_new_entities = True\n universities_df= universities_df.append(pd.DataFrame({\n 'alpha_two_code': [university.alpha_two_code], \n 'country': [university.country],\n 'web_pages': [str(university.web_pages)],\n 'domains': [str(university.domains)],\n 'name': [university.name],\n 'state_province':[str(university.state_province)]}) , ignore_index = True)\n\n #export back to csv if true\n if is_new_entities: \n print('we got new entities!') \n universities_df.to_csv('universities_data.csv', encoding = 'utf-8-sig', index = False)\n else:print('no new universities for now!')", "def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def main():\n\n dataframes = importing(['admissions_data', 'patient_data',\n 'diagnoses_icd_data', 'services_data',\n 'icustays'])\n merged_data = merging_data(dataframes)\n cleaned = data_cleaning(merged_data)\n\n cleaned.to_csv('raw_hospital_data.csv')", "def __init__(self, args):\n self.verbose = args.verbose\n self.force = args.force\n self.extra = args.extra\n self.master_csv = args.master\n self.new_files = args.new_files\n self.df_mas_lab_data = None # Master Lab data\n self.df_new_lab_data = None # Aggregated new Lab data\n self.columns = [\n \"CLIA\",\n \"FACILITY_TYPE\",\n \"CERTIFICATE_TYPE\",\n \"LAB_NAME\",\n \"STREET\",\n \"CITY\",\n \"STATE\",\n \"ZIP\",\n \"PHONE\",\n ]", "def main():\n now = time.strftime('%Y%m%d%H%M%S')\n\n # info = get_info(now)\n # info_filename = 'info_' + now + '.csv'\n # info.to_csv(os.path.join('..', '..', 'data', 'raw', info_filename), index=False)\n\n questions = get_questions(now)\n\n # don't talk about all this detail in the talk", "def main():\n\n csv_file = \"shortlist.csv\"\n team_count = 0\n participant_count = 0\n\n\n #Delete all existing teams and participants from the database.\n Team.objects.all().delete()\n Participant.objects.all().delete()\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = [row for row in reader]\n\n for item in data:\n if item[0]:\n team_count += 1\n\n t = Team.objects.create(\n name=item[0].strip(),\n idea=item[30].strip()\n )\n\n no_of_p = int(item[1])\n print item[1]\n participant_count += no_of_p\n\n p1 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[2].strip() + \" \" + item[3].strip(),\n gender=item[4].strip(),\n college=item[7].strip(),\n email=item[5].strip(),\n phone=str(item[6]),\n team=t\n )\n\n p2 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[11].strip() + \" \" +item[12].strip(),\n gender=item[13].strip(),\n college=item[16].strip(),\n email=item[14].strip(),\n phone=str(item[15]),\n team=t\n )\n\n if no_of_p == 3:\n p3 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[20].strip() + \" \" +item[21].strip(),\n college=item[25].strip(),\n gender=item[22].strip(),\n email=item[23].strip(),\n phone=str(item[24]),\n team=t\n )\n\n print \"{} teams and {} participants imported.\".format(team_count,\n participant_count)", "def __init__(self):\n self.file_name = 'data.csv'\n # Column of interest\n self._col = ['product_name', 'url', 'quantity', 'packaging']\n self._col += ['brands', 'origins', 'countries_fr', 'allergens']\n self._col += ['traces_fr', 'additives_n', 'additives_fr']\n self._col += ['nutrition_grade_fr', 'categories_fr']\n self._col += ['main_category_fr']\n\n # Check if the csv is already in the file\n try:\n with open(self.file_name, 'r'):\n pass\n except FileNotFoundError:\n CsvAnalysis.download_file()\n finally:\n # Read the csv file, and create a dataframe\n self.food_cat = pandas.read_csv(self.file_name,\n sep=\"\\t\",\n low_memory=False,\n usecols=self._col,\n encoding=\"utf8\")\n\n # Remove countries which aren't France\n mask = self.food_cat['countries_fr']\n self.food_cat = self.food_cat[mask == 'France']\n\n # Delete column countries_fr\n del self.food_cat['countries_fr']\n\n # Remove empty row countries_fr from dataframe\n columns = ['main_category_fr', 'product_name', 'nutrition_grade_fr']\n for column in columns:\n self.food_cat = self.food_cat[~self.food_cat[column].isnull()]\n\n # Remove empty row from product_name\n self.food_cat.sort_values(by='categories_fr')\n\n # Select the last value from categories_fr\n # to use it as a subcategory\n col = 'categories_fr'\n self.food_cat[col] = self.food_cat[col].str.split(',').str.get(-1)\n self.food_cat.sort_values(by='categories_fr')", "def __init__(self):\r\n self.filter_p_number = 3 # First one with enough data for statistics\r\n self.prfs_d = extract_settings_elvis()\r\n\r\n ccds = True\r\n filtered = False\r\n scamp = False\r\n\r\n input_df = read_csv('cats/cat_clean_ssos.csv', index_col=0)\r\n filt_cat = self.gets_filtered_catalog() # Gets data from filtered\r\n\r\n if ccds:\r\n cats_d = self.extract_cats()\r\n self.extract_stats_ccds(cats_d, input_df, filt_cat)\r\n elif filtered:\r\n self.extract_stats_filt(filt_cat, input_df)\r\n elif scamp:\r\n pass\r\n # self.extract_stats_scamp(input_df)\r\n else:\r\n pass", "def main():\n draft_class = 56\n from_csv = 'smjhl-2020-09-10.csv'\n to_csv = 'S' + str(draft_class) + '-bmi.csv'\n\n\n full_data = pd.read_csv(f\"../{from_csv}\")\n draft_class_data = full_data.loc[full_data['Draft Class Numeric'] == draft_class].copy()\n height_weight_raw = draft_class_data[[\"First Name\", \"Last Name\", \"Height\", \"Weight\"]]\n\n bmi_chart = bmi_magic(height_weight_raw)\n bmi_chart = bmi_chart.sort_values(by=[\"BMI\"], axis=0, ascending=False)\n\n # print(full_data)\n print(bmi_chart)\n bmi_chart.to_csv(to_csv)", "def main():\n\n # Read the CSV and get its content\n jobOfferList, professionsList = usefulFunctions.readCsv()\n \n # Create an empty output tab with the right number of lines and columns\n finalTab = usefulFunctions.createEmpty(jobOfferList, professionsList)\n \n # Fill the tab\n finalTab = usefulFunctions.fillTabExceptTotals(jobOfferList, professionsList, finalTab)\n \n # Update the totals \n finalTab = usefulFunctions.fillTotals(finalTab)\n \n print(\"\\nTable des métiers par profession et type de contrat : \")\n for line in finalTab:\n print(line)", "def part1():\n print('=== Starting Part 1 ===')\n data = pd.read_csv(DATA)\n\n print('Number of species:', hw2_pandas.species_count(data))\n print('Highest level pokemon:', hw2_pandas.max_level(data))\n print('Low-level Pokemon', hw2_pandas.filter_range(data, 1, 9))\n print('Average attack for fire types',\n hw2_pandas.mean_attack_for_type(data, 'fire'))\n print('Count of each Pokemon type:')\n print(hw2_pandas.count_types(data))\n print('Highest stage for each Pokemon type')\n print(hw2_pandas.highest_stage_per_type(data))\n print('Average attack for each Pokemon type')\n print(hw2_pandas.mean_attack_per_type(data))", "def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)", "def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')", "def main():\n\n # start at loading the dataset\n data = h1bdata_loading()\n merged_data = pd.concat([data[year] for year in range(2010,2017)], ignore_index= True)\n raw_data = h1b_data(data)\n \n \n\n # Then clean the data\n #h1b_data = Clean_df(raw_data)\n #print(\"data cleaned >>>\")\n\n\n while True:\n try:\n print (\"================================ H1b Visa Approve Rate Exploring ================================\")\n print (\"\")\n print (\" How do you want to explore the H1b Data? \")\n print (\" <a> : Overview \t\t \")\n print (\" <b> : Location \")\n print (\" <c> : Industry \")\n print (\" <d> : Company \") \n print (\" You can always input 'quit' to leave the system \")\n print (\"=================================================================================================\")\n\n key = option_input()\n if key == 'a':\n overview(data)\n if key == 'b':\n location(data)\n if key == 'c':\n industry_exploring(merged_data)\n if key == 'd':\n company_exploring(merged_data)\n except wrong_option_exception:\n print (\"Invalid option, please reselect.\")", "def main(argv):\n # Question 1\n # Saves the features given in a list\n features = (argv[2].split(sep=\", \"))\n the_data = data.load_data(argv[1], features)\n statistic_functions = [sum, mean, median]\n # Saves the relevant records\n summer_data, not_summer = data.filter_by_feature(the_data, \"season\", [1])\n holiday_data, not_holiday = data.filter_by_feature(the_data, \"is_holiday\", [1])\n print(\"Question 1:\")\n print(\"Summer:\")\n data.print_details(summer_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"Holiday:\")\n data.print_details(holiday_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"All:\")\n data.print_details(the_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n\n # Question 2\n print(\"\\nQuestion 2\")\n print(\"If t1<=13.0, then:\")\n # Saves the relevant records\n winter_data, not_winter = data.filter_by_feature(the_data, \"season\", [3])\n w_h_data, not_w_h_data = data.filter_by_feature(winter_data, \"is_holiday\", [1])\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n print(\"If t1>13.0, then:\")\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])", "def main():\n print(time.time())\n data = '../datasets/between_phase/clean_df.csv'\n print(\"Process Beginning\")\n print(\"Reading Clean CSV\")\n clean_df = pd.read_csv(data, dtype={\"journey_pattern_id\": str})\n print(clean_df.shape)\n base_table = Base_Table(clean_df)\n print(\"Adding datetime\")\n base_table.add_datetime()\n print(\"Adding Day\")\n base_table.add_day()\n print(\"Adding Hour\")\n base_table.add_hour()\n print(\"Adding Time Bin\")\n base_table.add_time_bin()\n print(\"Adding Weekend Boolean\")\n base_table.add_weekend()\n print(\"Adding Distance\")\n base_table.add_distance_feature()\n print(\"Updating Stop Id\")\n base_table.add_nearest_stop_distance()\n print(\"Filtering Data\")\n base_table.remove_null_stops()\n print(\"Adding Travel Time\")\n base_table.add_travel_time()\n print(\"Adding Congestion\")\n base_table.congestion_feature()\n bs = base_table.get_df()\n bs.to_csv('../datasets/output_files/base_table.csv')\n return bs", "def load(cls):\n \n # Loop through procedures and build patient procedure lists:\n procs = csv.reader(file(PROCEDURES_FILE,'U'),dialect='excel-tab')\n header = procs.next() \n for proc in procs:\n cls(dict(zip(header,proc))) # Create a procedure instance ", "def main():\n data = pd.read_csv('countries.csv')\n # import_data_pandas(data)\n # continent_data(data)\n # continent_data_le(data)\n continent_data_gdp_growth(data)", "def main(raw_filepath, interim_filepath, processed_filepath):\n raw_filepath = Path(raw_filepath)\n interim_filepath = Path(interim_filepath)\n processed_filepath = Path(processed_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n years = ['2010', '2011', '2012', '2013', '2014']\n\n #############################################################\n ################ Life Expectancy Outcome ####################\n #############################################################\n\n le_birth = pd.read_csv(raw_filepath / 'US_A.csv',\n usecols=['Tract ID', 'e(0)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index('t10_cen_uid_u_2010')\n\n le_other = pd.read_csv(raw_filepath / 'US_B.csv',\n usecols=['Tract ID', 'Age Group', 'e(x)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index(['t10_cen_uid_u_2010', 'Age Group']) \\\n .sort_index() \\\n .loc[(slice(None), ['15-24', '35-44', '55-64']), :] \\\n .unstack() \\\n .reindex(le_birth.index) # use the same tracts for all experiments\n\n le_other.columns = ['e(20)', 'e(40)', 'e(60)']\n\n # le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n # le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n # le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n # le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n\n ##############################################################\n ################## Priority Dataset ##########################\n ##############################################################\n\n with open(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', 'r') as f:\n cols = f.readline().strip().split(',')\n\n proj_cols = [x for x in cols if x[-4:] in years]# and\n # get all the priority NETS columns for later\n net_cols = ['t10_cen_uid_u_2010'] + [x[:11] + '_d_' + x[14:] for x in cols if '_net_' in x]\n\n data_X = pd.read_csv(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', usecols=proj_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010')\n\n # Create % younger than 25 (this method is far less than ideal)\n ag25up = data_X.filter(regex='.*(_pop_c_|ag25up).*')\n ag25up_coltuples = [(x[:-4], x[-4:]) for x in ag25up.columns]\n ag25up.columns = pd.MultiIndex.from_tuples(ag25up_coltuples)\n ag25up_long = ag25up.stack()\n ag25dwn_p = ((ag25up_long['t10_ldb_pop_c_'] - ag25up_long['t10_ldb_ag25up_c_'])\n / ag25up_long['t10_ldb_pop_c_']).unstack()\n ag25dwn_p.columns = ['t10_ldb_ag25dwn_p_' + x for x in ag25dwn_p.columns]\n\n # Create % older than 65\n ag65up = data_X.filter(regex='.*(_pop_c_|a60up).*')\n ag65up_coltuples = [(x[:-4], x[-4:]) for x in ag65up.columns]\n ag65up.columns = pd.MultiIndex.from_tuples(ag65up_coltuples)\n ag65up_long = ag65up.stack()\n ag65up_p = (ag65up_long['t10_ldb_a60up_c_'] / ag65up_long['t10_ldb_pop_c_']) \\\n .unstack()\n ag65up_p.columns = ['t10_ldb_ag60up_p_' + x for x in ag65up_p.columns]\n\n # Add our new measure\n data_X = pd.concat([data_X, ag25dwn_p, ag65up_p], axis=1)\n\n # Get rid of all count variables, including nets\n no_count_cols = [x for x in data_X.columns if '_c_' not in x]\n data_X = data_X[no_count_cols]\n\n\n drop_cols = ['t10_gis_area_l_2010',\n 'm10_cen_uid_u_2010',\n 'm10_cen_memi_x_2010',\n 'c10_cen_uid_u_2010',\n 'z10_cen_uid_u_2010']\n\n data_X = data_X.drop(columns=drop_cols) \\\n .reindex(le_birth.index)\n\n data_X.columns = pd.Index([(x[:-5], int(x[-4:])) for x in data_X.columns])\n\n X_priority = data_X.groupby(axis=1, level=0).mean()\n X_priority.to_csv(interim_filepath / 'X_priority.csv')\n\n ###########################################################\n #################### NETS Dataset #########################\n ###########################################################\n\n X_nets_allyrs = pd.read_csv(raw_filepath / 'recvd_t10_vars_v8_20190607.csv', usecols=net_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010') \\\n .reindex(le_birth.index)\n\n X_nets_allyrs.columns = pd.Index([(x[:-5], int(x[-4:])) for x in X_nets_allyrs.columns])\n X_nets = X_nets_allyrs.groupby(axis=1, level=0).mean()\n X_nets.to_csv(interim_filepath / 'X_nets.csv')\n\n # Split predictive data by Variable Set\n X_all = pd.concat([X_priority, X_nets], axis=1) \\\n .dropna(how='any')\n\n final_index = le_birth.index.intersection(X_all.index)\n X_all = X_all.reindex(final_index)\n le_birth = le_birth.reindex(final_index)\n le_other = le_other.reindex(final_index)\n\n le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n # Var Set 1\n p1_features = ['t10_ldb_hinci_m',\n 't10_ldb_pop_d',\n 't10_ldb_nhblk_p',\n 't10_ldb_hisp_p',\n 't10_ldb_col_p']\n X_p1 = X_all[p1_features]\n X_p1.to_csv(processed_filepath / 'X_varGroup1.csv')\n\n # Var Set 2\n p2_features = [\n \"t10_ldb_hinci_m\",\n \"t10_ldb_pop_d\",\n \"t10_ldb_ag25dwn_p\",\n \"t10_ldb_ag60up_p\",\n \"t10_ldb_nhblk_p\",\n \"t10_ldb_hisp_p\",\n \"t10_ldb_col_p\",\n \"t10_ldb_lep_p\",\n \"t10_ldb_mrenti_m\",\n \"t10_ldb_multi_p\",\n \"t10_ldb_nhwht_p\",\n \"t10_ldb_asian_p\",\n \"t10_ldb_fb_p\",\n \"t10_ldb_hs_p\",\n \"t10_ldb_unemp_p\",\n \"t10_ldb_npov_p\",\n \"t10_ldb_vac_p\",\n \"t10_ldb_own_p\",\n \"t10_ldb_mhmvali_m\"\n ]\n X_p2 = X_all[p2_features]\n X_p2.to_csv(processed_filepath / 'X_varGroup2.csv')\n\n # Var Set 3\n X_p3 = X_nets.reindex(final_index)\n X_p3.to_csv(processed_filepath / 'X_varGroup3.csv')\n\n # Var Set 4\n X_p4 = X_all\n X_p4.to_csv(processed_filepath / 'X_varGroup4.csv')", "def __init__(self, file_path):\n # will raise an error if the path is invalid, we don't need an\n # if statement here\n df = pandas.read_excel(file_path)\n\n \"\"\"\n read in the cities using a dictionary comprehension\n dictionary = { key: value for elem in iterable }\n In this case we are reading in the name of the city as the key\n and its corresponding CityLocation object as the value. We\n have made the assumption that each city has a unique name.\n \"\"\"\n #\n self.lulu = [Order(row[1][\"Date\"], row[1][\"Order Number\"],\n row[1][\"Brand\"], row[1][\"Garment\"],\n row[1][\"Count\"], row[1][\"Style name\"])\n for row in df.iterrows()\n if row[1][\"Brand\"] == \"Lululime\"]\n self.lulu = LululimeFactory()\n GarmentMaker(self.lulu)\n # brand = self.lulu[0].brand\n # garment = self.lulu[0].garment\n # print(brand)\n # print(garment)\n\n lulu_order = ((row[1][\"Date\"], row[1][\"Garment\"],\n row[1][\"Brand\"], row[1][\"Garment\"])\n for row in df.iterrows() if\n row[1][\"Brand\"] == \"Lululime\")\n for item in lulu_order:\n print(item)\n\n lulu_order = LululimeFactory()\n # test = GarmentMaker(lulu_order)\n # print(test)\n\n # for lulu in self.lulubrand:\n # print(lulu)\n # print(*self.lulubrand)" ]
[ "0.6210108", "0.60456717", "0.60032433", "0.5947413", "0.5876277", "0.58508295", "0.58461136", "0.5834677", "0.57999545", "0.57364744", "0.5730089", "0.57000756", "0.5691697", "0.5639754", "0.5626317", "0.56223726", "0.55977935", "0.55886185", "0.5551414", "0.55287653", "0.5524884", "0.55187", "0.5513985", "0.55032635", "0.5498475", "0.54731965", "0.5472796", "0.5467737", "0.54560685", "0.54558706" ]
0.6953013
0
Updates x, y (memoryshared) coordinates with actual mouse position with a given frequency.
def stream(bus, address, frequency, x, y, stop_trigger): mouse = Mouse.list_connected(bus=bus, address=address)[0] delay = 1./frequency while not stop_trigger: x1, y1 = mouse.get_position_change() x.value += x1 y.value += y1 time.sleep(delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def mouse_position_event(self, x: int, y: int):\n pass", "def update_pointer(self):\n pointer_length = -self.pointer_frac * self.radius\n # Add pi/2 to the angle because we consider 0 radians to be pi/2 in standard position.\n x = pointer_length * math.cos(self._radians + math.pi / 2)\n y = pointer_length * math.sin(self._radians + math.pi / 2)\n self.coords(self.pointer, 0, 0, x, y)", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def handle_mouse(self, x, y):\n self.x = x\n self.y = y\n global _pending_handle_mouse\n if not _pending_handle_mouse:\n _pending_handle_mouse = True\n if self.fig.document is not None:\n self.fig.document.add_timeout_callback(self.handle_mouse_callback, 100)\n else:\n self.handle_mouse_callback()", "def mousePosition(self):", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def handle_mouse(self, x, y):\n pass", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y", "def update(self):\n\n\t\tself.x = games.mouse.x\n\t\tself.y = games.mouse.y\n\t\tself.check_collide()", "def __master_cursor_pos_callback(self, glfw_window, xpos, ypos):\n # flip glfw window space to match OGL space(like texture that has bottom left origin)\n ypos = self.window.glyph.size[1] - ypos\n\n # update values\n self.__pos_instant = Vec(xpos, ypos, 0)\n self.__accel = self.__pos_instant - self.__pos_prev\n self.__pos_prev = self.__pos_instant\n\n # call registered callbacks\n self.call_cursor_pos_callback(glfw_window, *self.__pos_instant.xy, mouse=self)", "def update(self, delta_time):\r\n #for pixels in self.pixel:\r\n for line in self.cursor:\r\n line.draw()\r\n \r\n self.check_keys()", "def update(self):\r\n self.x = 60\r\n self.y = games.mouse.y\r\n self.check_collide()", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n \n pass", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def xy(self, xy_position):\n print(f\"xy: {xy_position}\")\n self.device_control.xy = xy_position\n yield", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def handle_mouse(self, x, y):\n # we are in aperture mode\n if self.aperture_id:\n if self.aperture_id not in self.aperture_model.aperture_models.keys():\n pass\n model = self.aperture_model.aperture_models[self.aperture_id]\n location = model.source.data['location'][0]\n\n if self.mode == 'width':\n width = abs(location - x)\n model.update_values(start=location - width,\n end=location + width)\n elif self.mode == 'left':\n if x < location:\n model.update_values(start=x)\n elif self.mode == 'right':\n if x > location:\n model.update_values(end=x)\n elif self.mode == 'location':\n diff = x - location\n model.update_values(location=x,\n start=model.source.data['start'][0] + diff,\n end=model.source.data['end'][0] + diff)\n\n self.last_x = x\n self.last_y = y\n return False", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def update_tempLists(self):\n self.current_position = self.mediaPlayer.position()\n\n # I add the current value, calculates its index, and removes it. This method is used to know which index the pointer is at.\n bisect.insort(self.xValues,self.current_position)\n self.position_index = self.xValues.index(self.current_position)\n self.xValues.remove(self.current_position)\n\n n = 120\n if self.position_index < n: \n self.tempXList = self.xValues[:self.position_index + n]\n self.tempYList = self.yValues[:self.position_index + n]\n self.tempCList = self.colors[:self.position_index + n]\n else:\n self.tempXList = self.xValues[self.position_index - n :self.position_index + n]\n self.tempYList = self.yValues[self.position_index - n :self.position_index + n]\n self.tempCList = self.colors[self.position_index - n :self.position_index + n]", "def handle_motion(self, x, y):\n if self.pressed_flag:\n self.last_point = (x, y)\n\n # trigger canvas to redraw itself\n self.redraw()", "def move_to_point(self, destination, frequency):\r\n # index 0 represents x, index 1 represents y\r\n if frequency > 1:\r\n x_towards = int(round((destination[0] - current_position[0]) / frequency))\r\n if x_towards > 1:\r\n x_towards -= 1\r\n y_towards = int(round((destination[1] - current_position[1]) / frequency))\r\n if y_towards > 1:\r\n y_towards -= 1\r\n current_position = list(self.current_position)\r\n if current_position[0] < destination[0]: # if x is west\r\n current_position[0] = current_position[0] + x_towards\r\n x_change = int(frequency)\r\n if current_position[0] >= destination[0]: # if this overshoots:\r\n current_position[0] = destination[0]\r\n elif current_position[0] == destination[0]:\r\n pass # don't move\r\n else:\r\n current_position[0] = current_position[0] - x_towards\r\n x_change = 0 - int(frequency)\r\n if current_position[0] <= destination[0]: # if this overshoots:\r\n current_position[0] = destination[0]\r\n if current_position[1] < destination[1]:\r\n current_position[1] = current_position[1] + y_towards\r\n y_change = int(frequency)\r\n if current_position[1] >= destination[1]: # if this overshoots:\r\n current_position[1] = destination[1]\r\n elif current_position[1] == destination[1]:\r\n pass\r\n else:\r\n current_position[1] = current_position[1] - y_towards\r\n y_change = 0 - int(frequency)\r\n if current_position[1] <= destination[1]: # if this overshoots:\r\n current_position[1] = destination[1]\r\n\r\n current_position = tuple(current_position)\r\n self.move_to(current_position)\r\n for i in list(range(1, int(frequency))):\r\n human_avoidance_list.append((current_position[0] + x_change, current_position[1] + y_change))\r\n self.current_position = current_position\r\n if current_position[0] == destination[0] and current_position[1] == destination[1]:\r\n self.resource_check = 1\r\n else:\r\n self.resource_frequency += self.resource_frequency / 6 # 6 * 5 days in a step = 30 days in a month\r\n # resource frequency is listed monthly in the source file\r\n # this indicates time passing each step until the gatherer can move\r", "def track_point(self, flags, x, y):\n ret_val, x.value, y.value = self._track_point(flags, x.value, y.value)\n return ret_val" ]
[ "0.5839249", "0.57032055", "0.5696799", "0.56878513", "0.5680848", "0.56477594", "0.5643513", "0.56162447", "0.5613641", "0.56051666", "0.558602", "0.55699193", "0.5541605", "0.5541605", "0.5534065", "0.5533534", "0.5528136", "0.5512954", "0.5495194", "0.54931223", "0.548661", "0.5484529", "0.5472189", "0.5459849", "0.54550296", "0.54440284", "0.5431041", "0.5381656", "0.53808516", "0.53771603" ]
0.6736627
0
Returns the focal length of the telescope.
def focal_length(self): return self.f * self.diameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl", "def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()", "def bspb_focalLength():\n shotCam = pm.PyNode('shot_cam').getShape()\n return str(shotCam.focalLength.get())", "def getLength(self):\n return self.geometry.length", "def getLength(self) -> float:\n return self.length", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def focallengthFromFOV(self, view_x=None, view_y=None): # pragma: no cover\n # to be overloaded by the child class.\n return 0", "def focal_point(self):\n return self._focal_point", "def length(self):\n return _property_op(arctern.ST_Length, self)", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def obs_length(self):\n return self.lc.time[-1] - self.lc.time[0]", "def getVocalized(self,):\n\t\treturn self.vocalized;", "def get_length(self):\n\n return self.length", "def auxiliary_trail_length(self):\n return self.attributes[\"_aux_length\"]", "def _get_length(self):\n return self._length", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def getLength(self):\n return self.length", "def getLength(self):\n return self.sideLength", "def time_length(self):\n return self._time_length", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def __len__(self):\n return len(self.focals)", "def getLength(self):\n flength = 0\n for quad in self._quadrilaterals:\n flength = flength + get_quad_length(quad)\n return flength", "def get_length(self):\n\n return self._length", "def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length", "def length(self):\n self.convert_window(\"Length\", \"meters\", [\"Scandinavian mile\", \"angstroms\", \"au\", \"barleycorns\", \"cables\", \"centimeters\", \"chains\", \"decimeters\", \"ells\", \"ems\", \"fathoms\", \"feet(UK & US)\", \"feet(US survey)\", \"furlongs\", \"hands\", \"hectometers\", \"inches\", \"kilometers\", \"links\", \"light years\", \"meters\", \"micrometers\", \"mil\", \"miles(UK & US)\", \"miles(nautical, UK)\", \"miles(nautical, international)\", \"millimeters\", \"nanometers\", \"parsecs\", \"pica\", \"picometers\", \"rods\", \"spans\", \"thou\", \"yards\"])", "def length(self):\n return self.get_delta_value(self.Z_INDEX)", "def length(self) -> float:\n return pos.distance(self.start, self.end)", "def get_length(self):\n if(type(self._length) != float):\n self._logger.write(\"Error! length must be of type float\")\n elif(self._length == None):\n self._logger.write(\"Error! length contains no value\")\n else:\n try:\n return self._length\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the value of length: \\n %s\" % e)", "def length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)" ]
[ "0.7237808", "0.6786899", "0.6465912", "0.63727885", "0.6352939", "0.62659967", "0.61646885", "0.616444", "0.61160403", "0.6020924", "0.5934602", "0.5932981", "0.59197545", "0.5891779", "0.588042", "0.58679926", "0.584611", "0.58384174", "0.58230686", "0.5822641", "0.5822641", "0.5792508", "0.5785025", "0.57838994", "0.5760595", "0.5746466", "0.57397634", "0.57205796", "0.571677", "0.5704886" ]
0.8241378
0
Returns the plate scale as an `~astropy.units.Quantity`.
def plate_scale(self): return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def plate_scale(platescale):\n if platescale.unit.is_equivalent(si.arcsec / si.m):\n platescale_val = platescale.to_value(si.radian / si.m)\n elif platescale.unit.is_equivalent(si.m / si.arcsec):\n platescale_val = (1 / platescale).to_value(si.radian / si.m)\n else:\n raise UnitsError(\"The pixel scale must be in angle/distance or distance/angle\")\n\n return Equivalency(\n [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],\n \"plate_scale\",\n {\"platescale\": platescale},\n )", "def scale(self):\n return self._gev_bijector.scale", "def scale(self):\n return self._scale", "def GetScale(self):\n ...", "def getnscale(self):\n return self.nscale", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self.distribution.scale", "def get_scale(units, compartmentId, volume, extracellularVolume):\r\n if compartmentId == 'c':\r\n V = volume\r\n else:\r\n V = extracellularVolume\r\n\r\n if units == 'uM':\r\n return 1. / N_AVOGADRO / V * 1e6\r\n elif units == 'mM':\r\n return 1. / N_AVOGADRO / V * 1e3\r\n elif units == 'molecules':\r\n return 1.\r\n else:\r\n raise Exception('Invalid units \"%s\"' % units)", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz", "def scale(self):\n return self._a", "def getScale(self):\n return self.factor**self.turnOn", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def scaling(self):\n return self.__scaling", "def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def temperature_scale(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"temperature_scale\"))\r\n return self._temperature_scale", "def scale_value(self):\n return self._scale_value[2]", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale_parameter(self):\n return self._scale_parameter", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def scaling(self):\n return self.stacked._box_scaling[1]", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret" ]
[ "0.7407171", "0.73354304", "0.7272574", "0.7260109", "0.72580636", "0.71984255", "0.7091628", "0.7008195", "0.69788766", "0.6805889", "0.6759866", "0.67273027", "0.67187494", "0.6713132", "0.6710965", "0.6695409", "0.6635227", "0.6599181", "0.65680814", "0.6560833", "0.6560833", "0.6483989", "0.6483741", "0.64718527", "0.64370936", "0.6424952", "0.6417758", "0.63952535", "0.63668525", "0.6326123" ]
0.7882506
0
Identifies genes that are significantly enriched for insertions (CTGs). This function takes a DataFrame of insertions, coming from multiple samples, and identifies if any genes are more frequently affected by an insertion than would be expected by chance. These genes are called Commonly Targeted Genes (CTGs). CTGs are selected by comparing the number of insertions within the gene to the number of insertions that would be expected from the background insertion rate, which is modeled using a Poisson distribution.
def test_ctgs( insertions, # type: List[Insertion] reference, # type: Reference gene_ids=None, # type: Set[str] chromosomes=None, # type: Set[str] pattern=None, # type: str per_sample=True, # type: bool window=None #type: Tuple[int, int] ): # Default to shared chromosome sequences (typically drops some # of the more esoteric extra scaffold/patch sequences). if chromosomes is None: reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) reference_gtf = GtfIterator(reference.indexed_gtf_path) chromosomes = list( set(reference_seq.keys()) & set(reference_gtf.contigs)) if len(chromosomes) == 0: ValueError('No chromosomes are shared between the reference ' 'sequence and reference gtf files') if len(chromosomes) == 0: raise ValueError('At least one chromosome must be given') # Determine gene windows using GTF. logging.info('Generating gene windows') gene_windows = _build_gene_windows( reference.indexed_gtf_path, window=window, chromosomes=chromosomes) # Subset insertions to gene intervals. insertions = _subset_to_windows(insertions, gene_windows) if gene_ids is None: gene_ids = set(ins.metadata['gene_id'] for ins in insertions) # Collapse insertions per gene/sample (recommended). # Corrects for hopping/multiple detection issues. if per_sample: logging.info('Collapsing insertions') insertions = list(_collapse_per_sample(insertions)) # Calculate total number of pattern occurrences within intervals. logging.info('Counting pattern occurrences') reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) total = count_total( reference_seq, pattern=pattern, intervals=gene_windows.values()) # Calculate p-values for each gene. logging.info('Calculating significance for genes') insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') p_values = { gene_id: test_region( insertions=insertions, reference_seq=reference_seq, region=gene_windows[gene_id], total=total, pattern=pattern, filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid], insertion_trees=insertion_trees) for gene_id in gene_ids } # Build result frame. result = pd.DataFrame.from_records( iter(p_values.items()), columns=['gene_id', 'p_value']) # Calculate corrected p-value using bonferroni correction. result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0) # Sort by q-value and p-value. result.sort_values(by=['q_value', 'p_value'], inplace=True) if len(insertions) > 0: # Annotate with gene_name if possible. if 'gene_name' in insertions[0].metadata: name_map = { ins.metadata['gene_id']: ins.metadata['gene_name'] for ins in insertions } result.insert(1, 'gene_name', result['gene_id'].map(name_map)) else: result['gene_name'] = np.nan # Annotate with frequency. frequency = (Insertion.to_frame(insertions) .groupby('gene_id')['sample'].nunique() .reset_index(name='n_samples')) result = pd.merge(result, frequency, on='gene_id', how='left') else: result['gene_name'] = np.nan result['n_samples'] = np.nan return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))", "def process_cgc(path, return_dataframe=False, fusions=False):\n # read in data\n df = pd.read_table(path)\n\n # keep small somatic variants\n if not fusions:\n s = df['Mutation Types']\n is_small = s.str.contains('Mis|F|N|S').fillna(False)\n is_somatic = ~df['Tumour Types(Somatic)'].isnull()\n df = df[is_small & is_somatic].copy()\n\n # label oncogenes / TSG\n df['Is Oncogene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('oncogene'), 'Is Oncogene'] = 'Yes'\n df['Is Tumor Suppressor Gene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('TSG'), 'Is Tumor Suppressor Gene'] = 'Yes'\n df['Is Driver Gene (CGC)'] = 'Yes'\n\n # rename columns\n df = df.rename(columns={'Entrez GeneId': 'Entrez Gene ID', 'Gene Symbol': 'Hugo Symbol'})\n\n # get gene names\n if not return_dataframe:\n cgc_genes = df['Gene Symbol'].tolist()\n else:\n cgc_genes = df\n\n return cgc_genes\n else:\n # return fusion gene information\n has_fus_partner = ~df['Translocation Partner'].isnull()\n output_list = []\n for ix, row in df[has_fus_partner].iterrows():\n g1 = row[\"Gene Symbol\"]\n for g2 in row['Translocation Partner'].split(', '):\n output_list.append([g1, g2])\n output_df = pd.DataFrame(output_list, columns=[\"Gene1\", \"Gene2\"])\n output_df['GENE_ID'] = output_df['Gene1'] + '--' + output_df['Gene2']\n\n if not return_dataframe:\n cgc_genes = list(set(output_df[\"Gene1\"].unique()) | set(output_df[\"Gene2\"]))\n else:\n cgc_genes = output_df\n\n return cgc_genes", "def check_chromosomes(fasta_chromosomes, gtf_chromosomes):\n fasta_unique = fasta_chromosomes - gtf_chromosomes\n gtf_unique = gtf_chromosomes - fasta_chromosomes\n if fasta_unique:\n logger.warning((\n 'The following chromosomes were found in the FASTA but doens\\'t have '\n 'any \"transcript\" features in the GTF: {}. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n if gtf_unique:\n logger.warning((\n 'The following chromosomes were found to have \"transcript\" features '\n 'in the GTF but doens\\'t exist in the FASTA. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n chromosomes = set.intersection(fasta_chromosomes, gtf_chromosomes)\n\n return chromosomes", "def gene_finder(dna, threshold):\n\n # YOUR IMPLEMENTATION HERE", "def check_gene_coverage(sequence_records, check_for_overlap=True):\n length_total = 0\n gene_length_total = 0\n total_length_by_feature = defaultdict(lambda: 0)\n for sequence_record in sequence_records:\n length_total += len(sequence_record.seq)\n for gene in sequence_record.features:\n gene_length_total += gene.location.end.position - gene.location.start.position\n # this section tries to keep track of subfeature types\n for feature in gene.sub_features:\n total_length_by_feature[feature.type] += len(feature)\n for subfeature in feature.sub_features:\n total_length_by_feature[subfeature.type] += len(subfeature)\n gene_coverage_fraction = float(gene_length_total)/length_total\n feature_coverage_fractions = [(feature,float(length)/gene_length_total) for feature,length \n in total_length_by_feature.items()]\n\n # TODO the by-feature coverage doesn't work because I'm only parsing the file for genes, not features!!! If I want to parse for features, I need to split things up into multiple passes etc again...\n #print total_length_by_feature\n\n # Check for overlapping genes and print a warning, since overlapping genes will make the measurement inaccurate\n if check_for_overlap:\n if check_for_overlapping_genes(sequence_record):\n print \"WARNING: There are overlapping genes! %% of length covered by genes may not be accurate.\"\n # MAYBE-TODO actually adjust the measurement for overlapping genes? Nah, too much work, not enough need for now.\n\n return gene_coverage_fraction, feature_coverage_fractions", "def findInteractions( targetGenes, geneTable ):\n pass", "def stats_gene(df):\n # res.write(f'Gene,Total,total[%],SGT\\n')\n taxa_count = len(df)\n df = df.sum().to_frame()\n df = df.rename(columns={0: 'Number of Taxa'})\n df[f'Percent of Total Taxa (out of {taxa_count})'] = round((df['Number of Taxa'] / taxa_count) * 100, 2)\n df = df.rename_axis('Gene Name')\n df = df.sort_values(by=['Number of Taxa'], ascending=False)\n df['SGT'] = ['yes'] * len(df)\n df.to_csv(f'{output_fold}/gene_stats.tsv', sep='\\t')", "def routine():\n genes = g.genes\n gene_db = db['ncbi_gene_docs']\n for gene in genes:\n count = gene_db.count({\"gene_id\": gene})\n if count is not 1:\n logger.debug(\"FixMe: {0};\\tCount: {1}\".format(gene, count))", "def simulate_generations(self, generations=DEFAULT_GENERATIONS):\n for i in range(generations):\n logging.getLogger().debug(self)\n self.__simulate_generation()\n\n if i < generations - 1:\n self.__delete_duplicates()\n\n return self.fittest_chromosome", "def per_gene_coverage(genes,df):\n\n sub_genes =[]\n\n #For every gene in the list, check the average coverage, if less than 100 add it to the final list.\n for gene in genes:\n coverage = average(df[df['GeneSymbol;Accession'] == gene]['percentage30'])\n\n if coverage < 100:\n sub_genes.append([gene.split(';')[0],round(coverage,2)])\n \n return sub_genes", "def matched_gc_bedfile(bedfile, matchfile, genome, number, size=None, min_bin_size=100):\n g = Genome(genome)\n genome_fa = g.filename\n try:\n fa = Fasta(matchfile)\n gc = [\n (seq.upper().count(\"C\") + seq.upper().count(\"G\")) / len(seq)\n for seq in fa.seqs\n ]\n sizes = [len(seq) for seq in fa.seqs]\n except Exception:\n try:\n # pylint: disable=unexpected-keyword-arg\n fields = pd.read_csv(matchfile, comment=\"#\", nrows=10, sep=\"\\t\").shape[1]\n tmp = (\n pybedtools.BedTool(matchfile).filter(lambda x: len(x) >= 10).saveas().fn\n )\n bed = pybedtools.BedTool(tmp)\n gc = np.array(\n [float(x[fields + 1]) for x in bed.nucleotide_content(fi=genome_fa)]\n )\n sizes = np.array([x.length for x in bed])\n gc = [round(x, 2) for x in gc]\n except Exception:\n logger.error(\"Please provide input file in BED or FASTA format\")\n raise\n\n # Get the median size of the sequences\n if size is None or size == 0:\n size = int(np.median(sizes))\n if np.std(sizes) > size * 0.05:\n logger.info(\"Sequences do not seem to be of equal size.\")\n logger.info(\n f\"GC% matched sequences of the median size ({size}) will be created\"\n )\n\n bins = [(0.0, 0.2), (0.8, 1)]\n for b in np.arange(0.2, 0.799, 0.05):\n bins.append((b, b + 0.05))\n\n fraction = number / len(gc)\n gc = np.array(gc)\n # print(\"GC\", gc)\n bin_count = []\n for b_start, b_end in bins:\n bin_count.append(\n int(np.sum((gc > round(b_start, 2)) & (gc <= round(b_end, 2))) * fraction)\n )\n\n # To make te requested number, divide remaining over\n # all bins that have counts\n rest = number - sum(bin_count)\n i = 0\n for _ in range(rest):\n while bin_count[i % len(bins)] == 0:\n i += 1\n bin_count[i % len(bins)] += 1\n i += 1\n\n nseqs = max(bin_count) * len(bins)\n\n with NamedTemporaryFile(delete=False) as tmp:\n gc_bin_bedfile(\n tmp.name,\n genome,\n nseqs,\n length=size,\n bins=bins,\n random_state=None,\n min_bin_size=min_bin_size,\n )\n df = pd.read_csv(tmp.name, sep=\"\\t\", names=[\"chrom\", \"start\", \"end\", \"bin\"])\n # print(tmp.name)\n with open(bedfile, \"w\") as f:\n pass\n with open(bedfile, \"a\") as f:\n for (b_start, b_end), n in zip(bins, bin_count):\n if n == 0:\n continue\n # print(b_start, b_end, n)\n b = f\"{b_start:.2f}-{b_end:.2f}\"\n df.loc[df[\"bin\"] == b, [\"chrom\", \"start\", \"end\"]].sample(n).to_csv(\n f, sep=\"\\t\", header=False, index=False\n )", "def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300):\n genes = pd.Series(genes_of_interest)\n go_group_presence = {}\n\n for GO in go2geneIDs:\n gene_ids = go2geneIDs[GO]\n\n # boolean vector (length is num of genes in embedding)\n in_go_group_vector = genes.isin(gene_ids)\n\n if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size):\n go_group_presence[GO] = in_go_group_vector\n\n result = pd.DataFrame(go_group_presence)\n result.index = genes\n result.index.name = 'entrezgene'\n return result", "def check_multi_exon(tr_nc_index_dict, ncdf):\n\n\tfor gene in tr_nc_index_dict:\n\t\n\t\ttempdf = ncdf.iloc[tr_nc_index_dict[gene][0]:tr_nc_index_dict[gene][1]]\n\t\texon_count = 0\n\t\t\n\t\tfor i in tempdf.index:\n\t\t\tif tempdf.loc[i,'feature'] == 'exon':\n\t\t\t\texon_count += 1\n\t# print exon_count\n\t\tif exon_count >1 :\n\t\t\tprint \" more than one exon for %s\" % gene\n\t\t\tsys.exit()\t# prevent writing fasta if there is multi exon transcript", "def find_entropy(less_than_threshold,more_than_threshold):\n\n ''' Storing total number of records '''\n total_records = len(less_than_threshold) + len(more_than_threshold)\n\n ''' Calculating the probability '''\n less_than_probability = len(less_than_threshold) / total_records\n more_than_probability = len(more_than_threshold) / total_records\n\n ''' Converting the dataframe to numpy arrays '''\n less_than_threshold_values = less_than_threshold.values\n more_than_threshold_values = more_than_threshold.values\n\n ''' Storing the target attribute values (Muffin or Cupcake) for threshold values '''\n target_for_less_than = less_than_threshold_values[:, -1]\n target_for_more_than = more_than_threshold_values[:, -1]\n\n ''' Finding the counts of muffin and cupcake for values lower than and greater than threshold value '''\n recipe_type, less_than_cupcake_muffin_count = np.unique(target_for_less_than, return_counts=True)\n recipe_type, more_than_cupcake_muffin_count = np.unique(target_for_more_than, return_counts=True)\n\n # print(recipe_type, more_than_cupcake_muffin_count, len(more_than_cupcake_muffin_count))\n ''' To ensure there are at least 5 records in each node '''\n if less_than_cupcake_muffin_count.sum() < 5 or more_than_cupcake_muffin_count.sum() < 5:\n ''' Return horrible badness '''\n return math.inf\n else:\n ''' Find the entropies for less than threshold values and more than threshold values '''\n less_than_entropy = sum((less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()) * - np.log2(\n less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()))\n more_than_entropy = sum((more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()) * - np.log2(\n more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()))\n\n ''' Calculate the total weighted entropy '''\n total_weighted_entropy = less_than_probability * less_than_entropy + more_than_probability * more_than_entropy\n\n return total_weighted_entropy", "def find_common_genes(input_fp):\n trait_genes = {}\n all_genes = []\n common_genes = []\n snp_count = {}\n traits = {}\n matrix = []\n print('Extracting genes from eQTL interactions for...')\n _,_,t_files = next(os.walk(input_fp), (None, None, []))\n for trait_file in t_files:\n trait = trait_file[:len(trait_file)-4]\n print('\\t' + trait)\n tfile = open(os.path.join(input_fp, trait_file), 'r')\n eqtls= csv.reader(tfile, delimiter = '\\t') \n next(tfile, None)\n for line in eqtls:\n genes = []\n if trait in trait_genes.keys():\n genes = trait_genes[trait]\n genes.append(line[3])\n trait_genes[trait] = genes\n all_genes.append(line[3])\n tfile.close()\n \n for trait in trait_genes:\n trait_genes[trait] = list(set(trait_genes[trait]))\n all_genes = list(set(all_genes))\n print(len(all_genes))\n\n done_genes = []\n \"\"\"\n for snp in all_snps:\n occur = all_snps.count(snp)\n if occur > 1 and snp not in done_snps:\n done_snps.append(snp)\n for record in trait_snps:\n if snp == record[1] and record not in common_snps:\n common_snps.append(record)\n snp_count[snp] = occur\n to_dict = []\n if record[0] not in traits.keys():\n to_dict.append(snp)\n traits[record[0]] = to_dict\n else:\n to_dict = traits[record[0]]\n to_dict.append(snp)\n traits[record[0]] = to_dict\n \"\"\"\n for trait in trait_genes.keys():\n gene_count = {}\n genes_total = len(trait_genes[trait])\n compare_traits = trait_genes.keys()\n if genes_total > 3:\n for trait_gene in trait_genes[trait]:\n for compare in compare_traits:\n if trait_gene in trait_genes[compare]:\n if compare not in gene_count.keys():\n gene_count[compare] = 1\n else:\n gene_count[compare] += 1\n #else:\n # gene_count[compare] = 0\n row = []\n row.append(trait)\n for t in gene_count:\n ratio = round(gene_count[t]/float(genes_total), 7)\n matrix.append([trait, t, genes_total, gene_count[t], ratio])\n\n \"\"\"\n with open (output_fp + '/' + 'common_snps_count.txt', 'wb') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['snp', 'count'])\n for snp in snp_count:\n writer.writerow([snp,snp_count[snp]])\n \"\"\"\n\n with open ('gene_matrix.txt', 'w') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['trait_x', 'trait_y', '#total_genes', '#common_snps', \\\n 'ratio'])\n writer.writerows(matrix)", "def coxen_single_drug_gene_selection(\n source_data,\n target_data,\n drug_response_data,\n drug_response_col,\n tumor_col,\n prediction_power_measure=\"pearson\",\n num_predictive_gene=100,\n generalization_power_measure=\"ccc\",\n num_generalizable_gene=50,\n multi_drug_mode=False,\n):\n\n if isinstance(drug_response_col, str):\n drug_response_col = np.where(drug_response_data.columns == drug_response_col)[\n 0\n ][0]\n\n if isinstance(tumor_col, str):\n tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]\n\n drug_response_data = drug_response_data.copy()\n drug_response_data = drug_response_data.iloc[\n np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],\n :,\n ]\n\n source_data = source_data.copy()\n source_data = source_data.iloc[\n np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],\n :,\n ]\n\n source_std_id = select_features_by_variation(\n source_data, variation_measure=\"std\", threshold=0.00000001\n )\n target_std_id = select_features_by_variation(\n target_data, variation_measure=\"std\", threshold=0.00000001\n )\n std_id = np.sort(np.intersect1d(source_std_id, target_std_id))\n source_data = source_data.iloc[:, std_id]\n target_data = target_data.copy()\n target_data = target_data.iloc[:, std_id]\n\n # Perform the first step of COXEN approach to select predictive genes. To avoid exceeding the memory limit,\n # the prediction power of genes is calculated in batches.\n batchSize = 1000\n numBatch = int(np.ceil(source_data.shape[1] / batchSize))\n prediction_power = np.empty((source_data.shape[1], 1))\n prediction_power.fill(np.nan)\n for i in range(numBatch):\n startIndex = i * batchSize\n endIndex = min((i + 1) * batchSize, source_data.shape[1])\n\n if prediction_power_measure == \"pearson\":\n cor_i = np.corrcoef(\n np.vstack(\n (\n np.transpose(\n source_data.iloc[:, startIndex:endIndex]\n .loc[drug_response_data.iloc[:, tumor_col], :]\n .values\n ),\n np.reshape(\n drug_response_data.iloc[:, drug_response_col].values,\n (1, drug_response_data.shape[0]),\n ),\n )\n )\n )\n prediction_power[startIndex:endIndex, 0] = abs(cor_i[:-1, -1])\n\n if prediction_power_measure == \"mutual_info\":\n mi = mutual_info_regression(\n X=source_data.iloc[:, startIndex:endIndex]\n .loc[drug_response_data.iloc[:, tumor_col], :]\n .values,\n y=drug_response_data.iloc[:, drug_response_col].values,\n )\n prediction_power[startIndex:endIndex, 0] = mi\n\n if multi_drug_mode:\n indices = np.argsort(-prediction_power[:, 0])\n return std_id[indices]\n\n num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))\n gid1 = np.argsort(-prediction_power[:, 0])[:num_predictive_gene]\n\n # keep only predictive genes for source and target data\n source_data = source_data.iloc[:, gid1]\n target_data = target_data.iloc[:, gid1]\n num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))\n # perform the second step of COXEN approach to select generalizable genes among the predictive genes\n gid2 = generalization_feature_selection(\n source_data.values,\n target_data.values,\n generalization_power_measure,\n num_generalizable_gene,\n )\n\n indices = std_id[gid1[gid2]]\n\n return np.sort(indices)", "def match_gc_content(pos_one_hot, neg_one_hot, neg_pos_ratio=1):\n N, L, A = pos_one_hot.shape\n gc_pos = np.sum(np.sum(pos_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n gc_neg = np.sum(np.sum(neg_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n print(' Average GC content for positive sequences: %.3f'%(np.mean(gc_pos)))\n print(' Average GC content for negative sequences: %.3f'%(np.mean(gc_neg)))\n\n pos_index = np.argsort(gc_pos)\n neg_index = np.argsort(gc_neg)\n num_neg = len(neg_index)\n num_pos = len(pos_index)\n\n match_index = []\n if num_neg > num_pos:\n k = 0\n status = True\n for i in pos_index:\n for j in range(k, num_neg):\n if gc_pos[i] < gc_neg[neg_index[j]]:\n if k > num_neg:\n status = False\n break\n else:\n # print(\"%.2f vs %.2f\"%(gc_pos[i], gc_neg[neg_index[j]]))\n match_index.append(neg_index[j])\n k = j+1\n break\n if not status:\n break\n\n remainder = int(num_pos*neg_pos_ratio) - len(match_index)\n print(' Found %d GC-matched sequences.'%(len(match_index)))\n if remainder > 0:\n print(' Adding %d more random negative sequences.'%(remainder))\n remain_index = np.array(list(set(range(num_neg)) - set(match_index)))\n index = np.random.permutation(len(remain_index))[:remainder] \n # index = np.argsort(gc_neg[remain_index])[::-1]\n for n in remain_index[index[:remainder]]:\n match_index.append(n)\n \n match_index = np.array(match_index)\n print(' Average GC content for sub-sampled negative sequences: %.3f'%(np.mean(gc_neg[match_index])))\n\n return neg_one_hot[match_index], match_index", "def coxen_multi_drug_gene_selection(\n source_data,\n target_data,\n drug_response_data,\n drug_response_col,\n tumor_col,\n drug_col,\n prediction_power_measure=\"lm\",\n num_predictive_gene=100,\n generalization_power_measure=\"ccc\",\n num_generalizable_gene=50,\n union_of_single_drug_selection=False,\n):\n\n if isinstance(drug_response_col, str):\n drug_response_col = np.where(drug_response_data.columns == drug_response_col)[\n 0\n ][0]\n\n if isinstance(tumor_col, str):\n tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]\n\n if isinstance(drug_col, str):\n drug_col = np.where(drug_response_data.columns == drug_col)[0][0]\n\n drug_response_data = drug_response_data.copy()\n drug_response_data = drug_response_data.iloc[\n np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],\n :,\n ]\n drugs = np.unique(drug_response_data.iloc[:, drug_col])\n\n source_data = source_data.copy()\n source_data = source_data.iloc[\n np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],\n :,\n ]\n\n source_std_id = select_features_by_variation(\n source_data, variation_measure=\"std\", threshold=0.00000001\n )\n target_std_id = select_features_by_variation(\n target_data, variation_measure=\"std\", threshold=0.00000001\n )\n std_id = np.sort(np.intersect1d(source_std_id, target_std_id))\n source_data = source_data.iloc[:, std_id]\n target_data = target_data.copy()\n target_data = target_data.iloc[:, std_id]\n\n num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))\n\n if union_of_single_drug_selection:\n if (\n prediction_power_measure != \"pearson\"\n and prediction_power_measure != \"mutual_info\"\n ):\n print(\n \"pearson or mutual_info must be used as prediction_power_measure for taking the union of selected genes of every drugs\"\n )\n sys.exit(1)\n gid1 = np.array([]).astype(np.int64)\n for d in drugs:\n idd = np.where(drug_response_data.iloc[:, drug_col] == d)[0]\n response_d = drug_response_data.iloc[idd, :]\n gid2 = coxen_single_drug_gene_selection(\n source_data,\n target_data,\n response_d,\n drug_response_col,\n tumor_col,\n prediction_power_measure,\n num_predictive_gene,\n generalization_power_measure,\n num_generalizable_gene,\n )\n gid1 = np.union1d(gid1, gid2)\n return np.sort(std_id[gid1])\n\n if prediction_power_measure == \"lm\":\n pvalue = np.empty((source_data.shape[1], 1))\n pvalue.fill(np.nan)\n drug_m = np.identity(len(drugs))\n drug_m = pd.DataFrame(drug_m, index=drugs)\n drug_sample = drug_m.loc[drug_response_data.iloc[:, drug_col], :].values\n for i in range(source_data.shape[1]):\n ge_sample = (\n source_data.iloc[:, i].loc[drug_response_data.iloc[:, tumor_col]].values\n )\n sample = np.hstack(\n (np.reshape(ge_sample, (len(ge_sample), 1)), drug_sample)\n )\n sample = sm.add_constant(sample)\n mod = sm.OLS(drug_response_data.iloc[:, drug_response_col].values, sample)\n try:\n res = mod.fit()\n pvalue[i, 0] = res.pvalues[1]\n except ValueError:\n pvalue[i, 0] = 1\n\n gid1 = np.argsort(pvalue[:, 0])[:num_predictive_gene]\n\n elif (\n prediction_power_measure == \"pearson\"\n or prediction_power_measure == \"mutual_info\"\n ):\n gene_rank = np.empty((len(drugs), source_data.shape[1]))\n gene_rank.fill(np.nan)\n gene_rank = pd.DataFrame(gene_rank, index=drugs)\n for d in range(len(drugs)):\n idd = np.where(drug_response_data.iloc[:, drug_col] == drugs[d])[0]\n response_d = drug_response_data.iloc[idd, :]\n temp_rank = coxen_single_drug_gene_selection(\n source_data,\n target_data,\n response_d,\n drug_response_col,\n tumor_col,\n prediction_power_measure,\n num_predictive_gene=None,\n generalization_power_measure=None,\n num_generalizable_gene=None,\n multi_drug_mode=True,\n )\n gene_rank.iloc[d, : len(temp_rank)] = temp_rank\n for i in range(\n int(np.ceil(num_predictive_gene / len(drugs))), source_data.shape[1] + 1\n ):\n gid1 = np.unique(\n np.reshape(gene_rank.iloc[:, :i].values, (1, gene_rank.shape[0] * i))[\n 0, :\n ]\n )\n gid1 = gid1[np.where(np.invert(np.isnan(gid1)))[0]]\n if len(gid1) >= num_predictive_gene:\n break\n gid1 = gid1.astype(np.int64)\n\n # keep only predictive genes for source and target data\n source_data = source_data.iloc[:, gid1]\n target_data = target_data.iloc[:, gid1]\n num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))\n\n # perform the second step of COXEN approach to select generalizable genes among the predictive genes\n gid2 = generalization_feature_selection(\n source_data.values,\n target_data.values,\n generalization_power_measure,\n num_generalizable_gene,\n )\n\n indices = std_id[gid1[gid2]]\n\n return np.sort(indices)", "def _cmd_genemetrics(args):\n cnarr = read_cna(args.filename)\n segarr = read_cna(args.segment) if args.segment else None\n is_sample_female = verify_sample_sex(cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome)\n # TODO use the stats args\n table = do_genemetrics(\n cnarr,\n segarr,\n args.threshold,\n args.min_probes,\n args.drop_low_coverage,\n args.male_reference,\n is_sample_female,\n args.diploid_parx_genome,\n )\n logging.info(\"Found %d gene-level gains and losses\", len(table))\n write_dataframe(args.output, table)", "def cis_insertions():\n\n return [\n # 1000 bp upstream of Trp53bp2.\n Insertion(id='INS1', chromosome='1', position=182408172,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS1'})),\n # Different chromosome.\n Insertion(id='INS2', chromosome='4', position=77843175,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS2'}))\n ] # yapf: disable", "def lof_sig_scores(table, samples, verbose=True):\n mut_probdam = 'Missense:Probably'\n mut_syn = 'Synonymous'\n mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']\n mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']\n mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other\n\n # Calculate the global nonsynonymous:synonymous ratio ---------------------\n # Within each mutation category, sum counts (across all genes)\n tot_count_probdam = sum(table[mut_probdam])\n tot_count_syn = sum(table[mut_syn])\n tot_count_trunc = sum(itertools.chain(*(list(table[col])\n for col in mut_trunc)))\n tot_count_other = sum(itertools.chain(*(list(table[col])\n for col in mut_other)))\n\n # Global mutation count across all categories and genes (= 3504)\n tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,\n tot_count_other))\n if verbose:\n print(\"Counted\", tot_count_all, \"mutations across\", len(table), \"genes\",\n \"and\", len(samples), \"samples\", file=sys.stderr)\n\n # Fraction of global mutations in each category of interest\n tot_frac_probdam = tot_count_probdam / tot_count_all\n tot_frac_syn = tot_count_syn / tot_count_all\n tot_frac_trunc = tot_count_trunc / tot_count_all\n\n # Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)\n tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn\n\n # Calculate each gene's mutation score ------------------------------------\n for _idx, row in table.iterrows():\n gene_count_all = sum([row[col] for col in mut_all])\n if not gene_count_all:\n # Gene is not mutated at all --> zero score\n yield (row['Gene'], 0.0)\n continue\n\n # Initial score is the sum the 'Normalized' values across all samples\n raw_score = sum(row[sid] for sid in samples)\n\n # Adjust for NS:S ratio\n gene_count_syn = row[mut_syn]\n syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,\n 0)\n new_score = raw_score * syn_factor\n\n # Adjust for \"probably damaging\" missense and truncating mutations\n gene_frac_probdam = row[mut_probdam] / gene_count_all\n probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam\n gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all\n trunc_factor = gene_frac_trunc / tot_frac_trunc\n final_score = new_score * probdam_factor * trunc_factor\n yield (row['Gene'], final_score)", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100", "def feature_selection_information_gain(df, string_cols, threshold = 0.01, label_col = 'label', pcg = 1.0):\n\n df = df.select(string_cols + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_categorical_columns(df, label_col=label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n string_cols = list(set(df.columns) - set([label_col]))\n\n # First pipeline: string indexer variables -> necessary to use them in models\n print('[INFO] Indexing categorical variables: ' + str(len(string_cols)))\n\n ig_df = information_gain(df=df, var_list=string_cols, label_col = label_col)\n\n cat_cols = ig_df\\\n .filter(col('ig') >= (threshold)*col('init_entropy'))\\\n .select('feature').rdd.map(lambda r: r['feature']).collect()\n\n # [ig[0] for ig in ig_results if (ig[1] >= threshold_abs)]\n\n return cat_cols", "def genes_feature_selection(methyl_data, cancer_genes):\n\n overlap_genes = cancer_genes.intersection(methyl_data.index)\n\n return methyl_data.ix[overlap_genes]", "def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets", "def test_simulated_gene_data(self):\n np.random.seed(0)\n\n sim_mat, cell_type, sim_de = simulate_matrix()\n\n # get scale\n scale = np.array(sim_mat.sum(axis=0)).squeeze()\n depth = (scale + 1) / np.median(scale)\n cov = [np.log(depth)]\n\n # precompute distribution params\n ntfmatrix = normalize_matrix(sim_mat, scale)\n alpha = atac_de.empirical_dispersion(ntfmatrix)\n\n # sseq_params = cr_de.compute_sseq_params(sim_mat)\n # alpha = sseq_params['phi_g']\n\n de_res = atac_de.NBGLM_differential_expression(sim_mat, np.flatnonzero(cell_type == 0), np.flatnonzero(cell_type == 1),\n model='nb', test_params={'cov': cov, 'alpha': alpha},\n verbose=False)\n\n sensitivity, ppv = evaluate_de_res(de_res, sim_de)\n\n assert sensitivity >= 0.94\n assert ppv >= 0.94", "def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()" ]
[ "0.62168896", "0.5950708", "0.5604085", "0.5591386", "0.5474958", "0.54715043", "0.5465627", "0.5456833", "0.535208", "0.5323605", "0.5284358", "0.52755475", "0.5262531", "0.52539337", "0.52331626", "0.520046", "0.5148381", "0.507968", "0.5076866", "0.5065854", "0.5053424", "0.5049818", "0.5047182", "0.50386256", "0.50204575", "0.50156444", "0.50059795", "0.49790072", "0.49786723", "0.49635655" ]
0.6928965
0
Subsets insertions for given gene windows.
def _subset_to_windows( insertions, # type: List[Insertion] gene_windows # type: Dict[str, Tuple[str, int, int]] ): # type: (...) -> List[Insertion] # Create lookup trees. trees = { chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int) for chrom, chrom_int in itertools.groupby( sorted(gene_windows.values()), operator.itemgetter(0)) } # Determine which insertions overlap tree intervals and # correspond to genes with known gene window. def _in_windows(ins, trees): try: return trees[ins.seqname].overlaps(ins.position) except KeyError: return False return [ ins for ins in insertions if ins.metadata['gene_id'] in gene_windows and _in_windows(ins, trees) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\tif line and not line.startswith('#'):\t\t# if line is not empty and not a comment\n#\t\tif line and re.match('\\d+', line):\n\t\t\tlogging.debug((\"line: %s\" %line))\n\t\t\tfields = line.split()\t\t# it is better to use the default splitting algorithm here.\n\t\t\t\t\t\t\t\t\t\t# read help(''.split)\t\n\n\t\t\tgene_name = fields[0]\n\t\t\tlogging.debug((\"fields: %s\" %fields))\n\t\t\tstart = int(fields[2])\n\t\t\tend = int(fields[3].strip())\t\t# remove \\n\\r, like chomp\n\t\t\tgenes.append((gene_name, start, end))\n\t\t\t\n#\tlogging.debug((\"genes :\", genes))\t\t# print the contents of genes, if level=loggin.DEBUG\n\n\t# read sliding windows file, and select windows that fall in genes\n\toutput = '#gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score\\n'\n\toutputlineskeleton = \"%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\"\t# %(gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\n\tfor line in sliding_windows_file:\n\t\tline = line.strip()\t\t# remove trailing characters (like chomp)\n\t\tif line and not line.startswith('#'):\n\t\t\twindow_fields = line.split()\n\n#\t\t\tlogging.debug(window_fields)\n\t\t\twindow_start = int(window_fields[0])\n\t\t\twindow_middle = int(window_fields[2])\n\t\t\twindow_end = int(window_fields[1])\n#\t\t\tgene = window_fields[3]\n\t\t\tpopulation = window_fields[4]\n\t\t\tnumber = window_fields[5]\n\t\t\tscore = window_fields[6]\n\n\t\t\tfor gene in genes:\n\t\t\t\tgene_start = int(gene[1])\n\t\t\t\tgene_end = int(gene[2])\n\t\t\t\tgene_name = gene[0]\n\t\t\t\t# if window_start is comprised between gene_end and gene_start\n\t\t\t\tif gene_end > window_start >= gene_start:\n\t\t\t\t\tlogging.debug(\"This window starts inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\t\t\telif gene_end >= window_end > gene_start:\n\t\t\t\t\tlogging.debug(\"This window ends inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\n\tlogging.debug(output)\n\toutput_file.write(output)\n\toutput_file.seek(0)\n\treturn output_file", "def select_windows(start, stop, num_windows,\n window_width=1, window_units=\"D\",\n sampling=1, sampling_units=\"T\",\n no_overlaps=True, verbose=True):\n\n # Create all sample candidates\n dt_range = pd.date_range(start, stop-pd.Timedelta(window_width),\n freq=\"%i%s\" % (sampling, sampling_units))\n\n # Sample candidate windows\n selected_windows = np.random.choice(dt_range, num_windows, replace=False)\n selected_windows = pd.DataFrame(selected_windows, columns=[\"start\"])\n\n # Calculate window end\n end_delta = (pd.Timedelta(window_width, unit=window_units)\n - pd.Timedelta(sampling,\n unit=\"m\" if sampling_units==\"T\" else sampling_units))\n selected_windows[\"end\"] = (selected_windows[\"start\"] + end_delta)\n\n # Filter overlaps\n if not no_overlaps:\n return selected_windows\n else:\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n\n while selected_windows.shape[0]<num_windows:\n if verbose:\n print(\"Got %i windows...\" % selected_windows.shape[0])\n\n selected_windows = pd.concat([selected_windows,\n select_windows(start, stop, num_windows,\n window_width, window_units,\n sampling, sampling_units,\n no_overlaps=False)],\n ignore_index=True)\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n return selected_windows.iloc[:num_windows]", "def create_subsets(self, start_ids):\n subsets = list()\n df = self.all_df.copy()\n for sid in start_ids:\n df2 = df.loc[sid:, :]\n subsets.append(df.drop(df2.index, axis=0))\n df = df2.copy()\n subsets.append(df)\n return subsets", "def main():\n\tsliding_windows_file_path = ''\n\tgenes_file_path = ''\n\toutput_file_path = ''\n\n\t# Read arguments and parameters\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"g:w:o:ht\", [\"genes=\", \"window=\", \"output=\", \"help\", \"test\"])\n\n\texcept getopt.GetoptError, err:\n\t\tusage()\n\t\tsys.exit(2)\n\t\n\tif opts == []:\n\t\tusage()\n\t\tsys.exit()\n\n\tfor opt, arg in opts:\n\t\tif opt in ('-h', '--help'):\n\t\t\tusage()\n\t\t\tsys.exit()\n\t\telif opt in ('--genes', '-g'):\n\t\t\tgenes_file_path = arg\n\t\telif opt in ('--window', '-w'):\n\t\t\tsliding_windows_file_path = arg\n\t\telif opt in ('--output', '-o'):\n\t\t\toutput_file_path = arg\n\t\telif opt in ('--test', '-t'):\n\t\t\t_test()\n\t\t\tsys.exit()\n\n\t# default values\n\tif not sliding_windows_file_path:\n\t\tprint \"using default parameters windows file!\"\n\t\tsliding_windows_file_path = '../data/Resultats_lower_daf.txt'\n\telif not genes_file_path:\n\t\tprint \"using default genes file!\"\n\t\tgenes_file_path = '../data/Genes.txt'\n\telif not output_file_path:\n\t\tprint \"using default output file!\"\n\t\toutput_file_path = '../results/filtered_windows.txt'\n\n\tsliding_windows_file = file(sliding_windows_file_path, 'r')\n\tgenes_file = file(genes_file_path, 'r')\n\toutput_file = file(output_file_path, 'w')\n\n\tfilter_windows(sliding_windows_file, genes_file, output_file)", "def genChunkTestSets(data, nSets, ws, gapSize, dirName=\"test_data/\", ofCut=0.9):\n # Start times for windows with at least ofCut of data observed\n tOFCut = np.where(windowObsFrac(data, ws) > ofCut)[0]\n\n # Choose times for test intervals\n np.random.seed(np.random.randint(0, 100))\n sampleTs = np.random.choice(tOFCut, size=nSets, replace=False)\n\n for ti in sampleTs:\n # Randomly select a sensor\n sensor = np.random.randint(0, data.shape[1])\n # Remove some data to use for testing\n _, removedTimes = removeChunk(data, ti, ws, sensor, gapSize)\n\n # Save data in csvs\n np.savetxt(dirName + \"/ti=%i_tf=%i_sensor=%i.csv\"%(ti, ti+ws, sensor), removedTimes, \\\n delimiter=\" \", fmt=\"%i\")", "def create_subset_list(self):\n\n row = 0\n for time_step in self.min_increments:\n subset = SubsetClass(time_step=time_step, query_df=self.query_df, model_df=self.model_df, row=row)\n self.subset_list.append(subset)\n row += 1", "def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]", "def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def create_subsets(subset_name, num_bg_ratings):\n bgplus_df = bgplus_table.get_full_df()\n details_df = details_table.get_full_df()\n users_df = users_table.get_full_df()\n ratings_df = ratings_table.get_full_df()\n ratings_df = ratings_df[ratings_df[\"bg_id\"].isin(details_df.iloc[:num_bg_ratings].index)]\n\n dump_dfs_to_trepos(subset_name, [(bgplus_df, bgplus_table), (details_df, details_table), (users_df, users_table), (ratings_df, ratings_table)])", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, min_len: int = 1, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n start_idxs = np.full(len(index), 0)\n end_idxs = np.arange(len(index))\n\n # Filter out short ranges\n window_lens = end_idxs - start_idxs + 1\n min_len_mask = window_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet window_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of windows {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def __create_windows(self, dat_in, dat_out, sequential):\n print(\"Creating windows...\")\n num_pkts = dat_in.shape[0]\n num_wins = math.ceil(num_pkts / self.win)\n fets = [(name, typ) for name, typ in dat_in.dtype.descr if name != \"\"]\n # Select random intervals from this simulation to create the\n # new input data. Do not pick indices between 0 and self.win\n # to make sure that all windows ending on the chosen index fit\n # within the simulation.\n pkt_idxs = random.choices(range(self.win, num_pkts), k=num_wins)\n # The new data format consists of self.win copies of the\n # existing input features. All copies of a particular feature\n # share the same scaling group.\n scl_grps, dtype = zip(\n *[(scl_grp, (f\"{name}_{idx}\", typ))\n for idx in range(self.win)\n for scl_grp, (name, typ) in enumerate(fets)])\n scl_grps = np.array(scl_grps)\n dat_in_new = np.zeros((num_wins,), dtype=list(dtype))\n\n for win_idx, end_idx in enumerate(pkt_idxs):\n # This could be done on a single line with a range select\n # and a generator, but this version is preferable because\n # it removes intermediate data copies and guarantees that\n # the resulting row is properly ordered.\n for fet_idx, pkt_idx in enumerate(\n range(end_idx - self.win + 1, end_idx + 1)):\n for name, _ in fets:\n dat_in_new[f\"{name}_{fet_idx}\"][win_idx] = (\n dat_in[pkt_idx][name])\n\n # Verify that we selected at least as many windows as we intended to.\n num_selected_wins = len(dat_in_new)\n assert num_selected_wins >= num_wins, \\\n f\"Insufficient windows: {num_selected_wins} < {num_wins}\"\n\n # As an output feature, select only the final ground truth\n # value. I.e., the final ground truth value for this window\n # becomes the ground truth for the entire window.\n return dat_in_new, np.take(dat_out, pkt_idxs), scl_grps", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, window_len: tp.Optional[float] = None,\n min_len: int = 1, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n if window_len is None and n is None:\n raise ValueError(\"At least n or window_len must be set\")\n if window_len is None:\n window_len = len(index) // n\n if 0 < window_len < 1:\n window_len = math.floor(window_len * len(index))\n start_idxs = np.arange(len(index) - window_len + 1)\n end_idxs = np.arange(window_len - 1, len(index))\n\n # Filter out short ranges\n window_lens = end_idxs - start_idxs + 1\n min_len_mask = window_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet window_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of windows {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def create_sets():\n global train_x, train_y, val_x, val_y\n\n print('Creating sets')\n\n dataframe = pd.read_csv('LoggerBot.log', names=NAMES).sample(frac=1)\n inputs = dataframe.values[:,:-1].astype(np.float32)\n outputs = dataframe.values[:,-1].astype(np.int32)\n\n train_set_size = int(len(dataframe) * 0.7)\n train_x, train_y = inputs[:train_set_size], outputs[:train_set_size]\n val_x, val_y = inputs[train_set_size:], outputs[train_set_size:]", "def region_sets(self,listA,listB):\n self.setA = GenomicRegionSet('for Unit Test')\n for i in range(len(listA)):\n self.setA.add(GenomicRegion(chrom=listA[i][0], initial=listA[i][1], final=listA[i][2]))\n \n self.setB = GenomicRegionSet('for Unit Test')\n for i in range(len(listB)):\n self.setB.add(GenomicRegion(chrom=listB[i][0], initial=listB[i][1], final=listB[i][2]))", "def generate_superset(self, number):\n \n superset = []\n for i in range(0, 2**(self.dim)):\n if (number & i)==number:\n superset.append(i)\n return superset", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)", "def add_genesets(snp_dict,gene_file):\n inf = open(gene_file,\"r\")\n for i in snp_dict.keys():\n snp_dict[i]['genes']=np.empty(len(snp_dict[i]['bps']), dtype=set)\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line.rstrip()\n fields=line.split()\n if len(fields) < 3:\n continue\n bps=int(fields[1])\n if fields[0] in snp_dict.keys():\n idx = snp_dict[fields[0]]['bps'].searchsorted(bps)\n if (idx < len(snp_dict[fields[0]]['bps'])) and snp_dict[fields[0]]['bps'][idx] == bps:\n snp_dict[fields[0]]['genes'][idx]=set([ x for x in fields[2:] ])\n return True", "def subsets(self):\n return set(self.subset_map.values())", "def split_ranges_into_sets(start_idxs: tp.ArrayLike, end_idxs: tp.ArrayLike,\n set_lens: tp.MaybeSequence[tp.Sequence[float]] = (),\n left_to_right: tp.MaybeSequence[bool] = True) -> RangesT:\n start_idxs = np.asarray(start_idxs)\n end_idxs = np.asarray(end_idxs)\n checks.assert_len_equal(start_idxs, end_idxs)\n\n for i in range(len(start_idxs)):\n start_idx = start_idxs[i]\n end_idx = end_idxs[i]\n\n range_len = end_idx - start_idx + 1\n new_set_lens = []\n if len(set_lens) == 0:\n yield (np.arange(start_idx, end_idx + 1),)\n else:\n if checks.is_sequence(set_lens[0]):\n _set_lens = set_lens[i]\n else:\n _set_lens = set_lens\n if checks.is_sequence(left_to_right):\n _left_to_right = left_to_right[i]\n else:\n _left_to_right = left_to_right\n for j, set_len in enumerate(_set_lens):\n if 0 < set_len < 1:\n set_len = math.floor(set_len * range_len)\n if set_len == 0:\n raise ValueError(f\"Set {j} in the range {i} is empty\")\n new_set_lens.append(set_len)\n if sum(new_set_lens) < range_len:\n if _left_to_right:\n new_set_lens = new_set_lens + [range_len - sum(new_set_lens)]\n else:\n new_set_lens = [range_len - sum(new_set_lens)] + new_set_lens\n else:\n raise ValueError(f\"Range of length {range_len} too short to split into {len(_set_lens) + 1} sets\")\n\n # Split each range into sets\n idx_offset = 0\n set_ranges = []\n for set_len in new_set_lens:\n new_idx_offset = idx_offset + set_len\n set_ranges.append(np.arange(start_idx + idx_offset, start_idx + new_idx_offset))\n idx_offset = new_idx_offset\n\n yield tuple(set_ranges)", "def windows(self, windows):\n\n self._windows = windows", "def addSubsetProteins(self, proteinIds):\n self._addProteins(proteinIds, ['subset', 'proteins'])", "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)", "def _construct_windows(self, Nw, ti, i0=0, i1=None):\n if i1 is None:\n i1 = Nw\n\n # get data for windowing period\n df = self.data.get_data(ti-self.dtw, ti+(Nw-1)*self.dto)[self.data_streams]\n\n # create windows\n dfs = []\n for i in range(i0, i1):\n dfi = df[:].iloc[i*(self.iw-self.io):i*(self.iw-self.io)+self.iw]\n try:\n dfi['id'] = pd.Series(np.ones(self.iw, dtype=int)*i, index=dfi.index)\n except ValueError:\n print('hi')\n dfs.append(dfi)\n df = pd.concat(dfs)\n window_dates = [ti + i*self.dto for i in range(Nw)]\n return df, window_dates[i0:i1]", "def bind(self, exp_to_use,window):\n for gene_name in self.genes_name_list:\n gene_data = self.data[gene_name]\n max_position = gene_data[exp_to_use].idxmax()\n motif = ''.join(list(gene_data[max_position-window:max_position+window]['nucleotide']))\n if len(motif) >= 8:\n print '>'+gene_name\n print motif + '\\n'\n # print gene_data[max_position-window:max_position+window]" ]
[ "0.5770418", "0.53267133", "0.52984023", "0.51812553", "0.51691467", "0.51118696", "0.5079057", "0.50481063", "0.50467324", "0.5036095", "0.50196743", "0.50149506", "0.49723238", "0.49606135", "0.49236315", "0.49137327", "0.48717156", "0.48612767", "0.48102915", "0.48091227", "0.47925404", "0.47831523", "0.47766387", "0.47659022", "0.47584647", "0.4756608", "0.47518867", "0.47471407", "0.47261307", "0.47094387" ]
0.7655855
0
Tests a given genomic region for enrichment in insertions.
def test_region( insertions, # type: List[Insertion] reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None, # type: Optional[str] intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]] total=None, # type: Optional[int] filters=None, # type: Optional[List[Callable]] insertion_trees=None # type: GenomicIntervalTree ): # type: (...) -> float if total is None: total = count_total( reference_seq, pattern=pattern, intervals=intervals) # Count pattern in region. region_count = count_region(reference_seq, region=region, pattern=pattern) # Sub-select insertions for region. if insertion_trees is None: insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') region_ins = set(interval[2] for interval in insertion_trees.search(*region)) # Apply additional filter functions to insertions if given # (such as filtering on gene name/id for example). if filters is not None: for filter_func in filters: region_ins = set(ins for ins in region_ins if filter_func(ins)) # Calculate p-value. x = len(list(region_ins)) mu = len(insertions) * (region_count / total) # Note here we use loc=1, because we are interested in # calculating P(X >= x), not P(X > x) (the default # surivival function). p_val = poisson.sf(x, mu=mu, loc=1) # type: float return p_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)", "def test_signal_regions(i07_nexus: I07Nexus, regions):\n # Note: this should probably always be a for loop with just 1 iteration.\n for i, _ in enumerate(regions):\n assert i07_nexus.signal_regions[i] == regions[i]", "async def test_genomic_insertion(test_handler, genomic_insertion,\n grch38_genomic_insertion):\n resp = await test_handler.normalize(\"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\") # noqa: E501\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\")\n\n fixture_id = \\\n \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\"\n resp = await test_handler.normalize(\"17-37880993-G-GGCTTACGTGATG\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:17-37880993-G-GGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"17-37880993-G-GGCTTACGTGATG\")\n\n resp = await test_handler.normalize(\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")\n assert resp.variation_descriptor.id ==\\\n \"normalize.variation:ERBB2%20g.37880993_37880994insGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, genomic_insertion,\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")", "def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def _process_region(self, region, writer):", "def test_avalanche_warning_by_region_simple(self):\n pass", "def test_ith_region_nxs_01(i07_nexus_object_01: I07Nexus,\n i, ith_region):\n assert i07_nexus_object_01._get_ith_region(i) == ith_region", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def is_in_region(location, region):\n c= count_hits_region(location, region)\n if c%2==1:\n return True\n else:\n return False", "def assertRegionsEqual(self, expected_region, actual_region, msg=None):\n if (expected_region.size() == 1) and (actual_region.size() == 1):\n expected_region = _make_region(self.view, expected_region.begin(), expected_region.end())\n actual_region = _make_region(self.view, actual_region.begin(), actual_region.end())\n self.assertEqual(expected_region, actual_region, msg)", "def test_ensure_coverage_works_on_edition(self):\n edition = self._edition()\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n record = provider.ensure_coverage(edition)\n assert isinstance(record, CoverageRecord)\n assert edition.primary_identifier == record.identifier", "def check_region(deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,\n species: str,\n contig_chrom: Dict[str, str]) -> Tuple[bool, bool, bool]:\n\n # Eval set contains only molecules that start and end within the bounds.\n # Train set contains only molecules that are entirely outside of the bounds.\n # Based on this logic, molecules that span the training and eval regions\n # will be thrown out entirely.\n\n if species == 'ecoli':\n assert 'ecoli' in deepconsensus_input.chrom_name\n in_train_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['TRAIN']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['TRAIN'])\n in_eval_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['EVAL']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['EVAL'])\n in_test_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['TEST']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['TEST'])\n\n elif species == 'human':\n assert 'ecoli' not in deepconsensus_input.chrom_name\n # Resolve the chrom name for each contig\n chrom_name = contig_chrom.get(deepconsensus_input.chrom_name,\n deepconsensus_input.chrom_name)\n in_train_region = chrom_name in dc_constants.HUMAN_TRAIN_REGIONS\n in_eval_region = chrom_name in dc_constants.HUMAN_EVAL_REGIONS\n in_test_region = chrom_name in dc_constants.HUMAN_TEST_REGIONS\n\n else:\n raise ValueError(\n f\"Invalid species: {species}. Must be either 'human' or 'ecoli.'\")\n\n return in_train_region, in_eval_region, in_test_region", "def region_gene_overlap(\n region_pr,\n gene_bed,\n up=100_000,\n down=100_000,\n):\n genes = pr.read_bed(gene_bed)\n # Convert to DataFrame & we don't need intron/exon information\n genes = genes.as_df().iloc[:, :6]\n\n # Get the TSS only\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] = genes.loc[\n genes[\"Strand\"] == \"+\", \"Start\"\n ]\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] = genes.loc[\n genes[\"Strand\"] == \"-\", \"End\"\n ]\n\n # Extend up and down\n genes.loc[genes[\"Strand\"] == \"+\", \"Start\"] -= up\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] += down\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] -= down\n genes.loc[genes[\"Strand\"] == \"-\", \"End\"] += up\n\n # Perform the overlap\n genes = pr.PyRanges(genes)\n genes = genes.join(region_pr).as_df()\n\n return genes", "def test_bkg_regions(i07_nexus: I07Nexus, regions):\n for i, _ in enumerate(regions):\n assert i07_nexus.background_regions[i] == regions[i]", "def test_job_region(self):\n inv_search = 'region:EU not region:Europe'\n spi_search = 'find region EU not continent Europe'\n self._compare_searches(inv_search, spi_search)", "def test_avalanche_warning_by_region_obs(self):\n pass", "def test_signal_regions_len(i07_nexus, regions):\n assert len(i07_nexus.signal_regions) == len(regions)", "def test_e2e(self):\n\n # Make segmentation & regions file\n seg = get_temp_file_name(extension='gtf')\n out_dir = get_temp_dir()\n iCount.genomes.segment.get_segments(self.gtf, seg, self.fai)\n iCount.genomes.segment.make_regions(seg, out_dir)\n regions = os.path.join(out_dir, iCount.genomes.segment.REGIONS_FILE)\n\n # Build STAR index:\n genome_index = get_temp_dir()\n rcode = iCount.externals.star.build_index(self.fasta, genome_index, annotation=self.gtf)\n self.assertEqual(rcode, 0)\n # Map reads:\n map_dir = get_temp_dir()\n rcode = iCount.externals.star.map_reads(\n self.reads, genome_index, out_dir=map_dir, annotation=self.gtf)\n self.assertEqual(rcode, 0)\n\n # Get bam with mapped reads:\n bam = [fname for fname in os.listdir(map_dir) if fname.startswith('Aligned')][0]\n bam = os.path.join(map_dir, bam)\n pysam.index(bam) # pylint:disable=no-member\n\n sites_single = get_temp_file_name(extension='bed.gz')\n sites_multi = get_temp_file_name(extension='bed.gz')\n skipped = get_temp_file_name(extension='bam')\n iCount.mapping.xlsites.run(bam, sites_single, sites_multi, skipped)\n\n iCount.analysis.rnamaps.run(sites_single, regions)", "def test_avalanche_warning_by_region_detail(self):\n pass", "def _sample_regions(region_rois, gt_regions, voc_sign):\n # overlaps: (rois x gt_regions)\n overlaps_gt = bbox_overlaps(\n np.ascontiguousarray(region_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_regions[:, :4], dtype=np.float))\n # gt_assignment = overlaps_gt.argmax(axis=1)\n max_overlaps_gt = overlaps_gt.max(axis=1)\n # labels = gt_regions[gt_assignment, 4:]\n fg_inds = np.where(max_overlaps_gt >= cfg.TRAIN.FG_THRESH_REGION)[0]\n bg_inds = np.where(\n (max_overlaps_gt < cfg.TRAIN.BG_THRESH_HI_REGION) & (max_overlaps_gt >= cfg.TRAIN.BG_THRESH_LO_REGION))[0]\n\n # ## Debug Codes\n # print('fg: {} v.s. bg:{}'.format(len(fg_inds), len(bg_inds)))\n # gt_hit_overlap = overlaps_gt.max(axis=0)\n # hit_ids = np.unique(np.where(gt_hit_overlap >= cfg.TRAIN.FG_THRESH_REGION)[0])\n # print('Recall: {} ({}/{})'.format(\n # float(len(hit_ids)) / len(gt_regions), len(hit_ids), len(gt_regions)))\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = np.ones((len(keep_inds), gt_regions.shape[1] - 4), dtype=np.int64) * voc_sign['end']\n # Here we randomly select regions overlapped with proposed ROI more than 0.7\n gt_assignment = np.zeros(len(fg_inds), dtype=np.int64)\n for i in range(len(fg_inds)):\n gt_assignment[i] = npr.choice(np.where(overlaps_gt[fg_inds[i]] > cfg.TRAIN.FG_THRESH_REGION)[0], size=1)\n labels[i] = gt_regions[gt_assignment[i], 4:]\n\n # add start label to background and padding them with <end> sign\n labels[len(fg_inds):, 0] = voc_sign['start']\n rois = region_rois[keep_inds]\n\n targets_fg = bbox_transform(rois[:len(fg_inds), 1:5], gt_regions[gt_assignment, :4])\n bbox_inside_weights_fg = np.ones(targets_fg.shape, dtype=np.float32) * cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n targets_bg = np.zeros((bg_inds.size, targets_fg.shape[1]), dtype=np.float32)\n bbox_inside_weight_bg = np.zeros(targets_bg.shape, dtype=np.float32)\n bbox_targets = np.vstack([targets_fg, targets_bg])\n bbox_inside_weight = np.vstack([bbox_inside_weights_fg, bbox_inside_weight_bg])\n\n return labels, bbox_targets, bbox_inside_weight, keep_inds", "def createSubdivRegion(*args, **kwargs)->bool:\n pass", "def establecer_region(self, region, guess, delta_ppm=(1,1)): \r\n # obtengo los indices del centro del pico.\r\n xc, yc = self.encontrar_picos(guess, delta_ppm)\r\n # obtengo las coordenadas que determinan el rectangulo donde voy a\r\n # integrar. \r\n x_lims, y_lims = self.establecer_limites(xc, yc)\r\n \r\n xi,xf = x_lims\r\n yi,yf = y_lims\r\n spec = self.spec[yi:yf, xi:xf]\r\n ppmGridDir = self.ppmGridDir[yi:yf, xi:xf]\r\n ppmGridInd = self.ppmGridInd[yi:yf, xi:xf]\r\n \r\n \r\n n, m = region\r\n self.regiones[n][m] = Region(ppmGridDir, ppmGridInd, spec)", "def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result", "def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def region(self, gnuc_beg, gnuc_end):\n # check if gnuc_beg and gnuc_end are inside the genomic region\n pexon = None\n overlapping_exons = []\n for exon in self.exons:\n if (exon[0] <= gnuc_beg and exon[1] >= gnuc_end):\n _cds_beg = min(self.cds_beg, self.cds_end)\n _cds_end = max(self.cds_beg, self.cds_end)\n\n if gnuc_beg > _cds_beg and gnuc_end < _cds_end:\n return 'Coding'\n elif gnuc_beg < _cds_beg and gnuc_end < _cds_beg:\n return \"5'UTR\" if self.strand == '+' else \"3'UTR\"\n elif gnuc_beg > _cds_end and gnuc_end > _cds_end:\n return \"3'UTR\" if self.strand == '+' else \"5'UTR\"\n elif gnuc_beg < _cds_beg:\n return \"5'UTR;coding\" if self.strand == '+' else \"3'UTR;coding\"\n elif gnuc_end > _cds_end:\n return \"coding;3'UTR\" if self.strand == '+' else \"coding;5'UTR\"\n else:\n return \"Unknown\"\n if exon[0] >= gnuc_beg and exon[0] <= gnuc_end:\n overlapping_exons.append(exon)\n if pexon and gnuc_beg > pexon[1] and gnuc_end < exon[0]:\n return 'Intronic'\n pexon = exon\n\n if overlapping_exons:\n return 'Intronic;Exonic'\n else:\n return 'Unknown'", "def test_region_check(self):\n reference = {'region': 'reference'}\n target = {'region': 'target'}\n\n # Check that IOError is raised for nonmatching regions\n self.assertRaises(IOError, librad_drift.RadiometricDrift.check_fields, reference, target)\n\n # Check no error raised if regions match\n librad_drift.RadiometricDrift.check_fields(reference, reference)", "def test_center_region(self):\n before_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n after_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"7.0\"),\n after_sel=(\"1.0\", \"7.0\"),\n command_name=\"center-region\",\n directives=\"@pagewidth 70\",\n )", "def validate_det1_region(regfile):\n err=-1\n import regions\n# from regions.io.ds9.read import DS9Parser\n from regions import Regions\n assert os.path.isfile(regfile), f'{regfile} does not exist!'\n \n# with open(regfile) as fh: \n# region_string = fh.read()\n# parser = DS9Parser(region_string)\n# assert parser.coordsys == 'image', \\\n# f'Region coordinate system is {parser.coordsys}, not image!'\n\n reg = Regions.read(regfile)\n\n\n # Check and make sure this is a \"pixel\" region and not a \"sky\" region\n\n assert 'Pixel' in f'{type(reg[0])}', \\\n f'Region coordinate system is not image coordinates for {regfile}\\n'\n\n # Check to make sure tha the first region in the file is an \"include\" region\n for ri in reg:\n assert ri.meta['include'] is True, \\\n f'\\n {regfile} has an exclusion region first! \\n Put the source region first instead!'\n break" ]
[ "0.6306166", "0.5873809", "0.5715272", "0.5644633", "0.5633991", "0.5508607", "0.55017656", "0.5482008", "0.53994864", "0.53851366", "0.53696203", "0.53695005", "0.53581506", "0.5352486", "0.5298148", "0.5286712", "0.52787757", "0.5276704", "0.52690274", "0.5268633", "0.52497023", "0.5241601", "0.5193902", "0.5164972", "0.5160878", "0.51478887", "0.51445574", "0.51347893", "0.51325184", "0.51126933" ]
0.6274175
1
Counts occurrences of pattern within given genomic region.
def count_region( reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None # type: Optional[str] ): # type: (...) -> int chrom, start, end = region seq = reference_seq[chrom][int(start):int(end)] return _count_sequence(seq, regex=_build_regex(pattern))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_hits_region(location, region):\n l=len(region)\n c=0\n for i in range(0,l-1):\n if hits_border(location,region[i],region[i+1])==True:\n c=c+1\n return c", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def annotate_pattern_occurrences(\n record, pattern, feature_type=\"misc_feature\", prefix=\"!\"\n):\n new_record = deepcopy(record)\n label = prefix + str(pattern)\n for location in pattern.find_matches(str(record.seq)):\n annotate_record(\n new_record,\n location=(location.start, location.end),\n feature_type=feature_type,\n label=label,\n )\n return new_record", "def count_locs(file_type, comment_pattern):\n find = \"find . -name '*.{0}' -print0\".format(file_type)\n sed_pattern = \"'/^\\s*{0}/d;/^\\s*$/d'\".format(comment_pattern)\n\n cmd = \"{0} | xargs -0 sed {1} | wc -l\".format(find, sed_pattern)\n\n return check_output(cmd, shell = True).decode('utf-8').replace('\\n', '')", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def count_total(\n reference_seq, # type: pyfaidx.Sequence\n pattern=None, # type: str\n intervals=None # type: Iterable[Tuple[str, int, int]]\n): # type: (...) -> int\n\n regex = _build_regex(pattern)\n\n if intervals is None:\n # Simply count for the entire sequence.\n count = sum(_count_sequence(reference_seq[seq], regex=regex)\n for seq in reference_seq.keys()) # yapf: disable\n else:\n # Flatten intervals, and then only count for sequences\n # within the flattened intervals.\n merged_intervals = list(merge_genomic_intervals(intervals))\n\n seqs = [\n reference_seq[chrom][start:end]\n for chrom, start, end in merged_intervals\n ]\n\n count = sum(_count_sequence(seq, regex=regex) for seq in seqs)\n\n return count", "def get_count(self):\n\n return len(self._pattern)", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def count(grid):\n star='@'\n c = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==star: c += 1\n return c", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def count(self, contig=None, start=None, stop=None, region=None,\n until_eof=False, tid=None, read_callback='nofilter',\n reference=None, end=None):\n\n # pass the signature to fetch\n signature = locals()\n signature.pop('read_callback')\n signature.pop('self')\n roi_reads = self.fetch(**signature)\n # make `nofilter` the default filter unless told otherwise\n # read_callback = kwargs.get('read_callback', 'nofilter')\n\n # go through all the reads over a given region and count them\n count = 0\n for read in roi_reads:\n if filter_read(read, read_callback):\n count += 1\n return count", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count" ]
[ "0.690619", "0.6891801", "0.6734169", "0.661228", "0.64735407", "0.64611524", "0.645101", "0.6441295", "0.643269", "0.63974696", "0.6269934", "0.6190485", "0.61015546", "0.5953266", "0.5953266", "0.58486587", "0.57067573", "0.56916803", "0.56484526", "0.56377214", "0.56303567", "0.5600146", "0.55988044", "0.5591743", "0.55828637", "0.5572805", "0.55492663", "0.5517274", "0.550821", "0.54543614" ]
0.7694619
0
Counts occurrences of pattern in sequence.
def _count_sequence(sequence, regex=None): # type: (pyfaidx.Sequence, Pattern[str]) -> int if regex is None: count = len(sequence) else: count = sum((1 for _ in regex.finditer(str(sequence)))) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def count(seq):\n\treturn sum(1 for x in seq)", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def get_count(self):\n\n return len(self._pattern)", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def count(seq):\n\n if not seq:\n return 0\n elif isinstance(seq[0], list):\n return count(seq[0]) + count(seq[1:])\n else:\n return 1 + count(seq[1:])", "def count_total(\n reference_seq, # type: pyfaidx.Sequence\n pattern=None, # type: str\n intervals=None # type: Iterable[Tuple[str, int, int]]\n): # type: (...) -> int\n\n regex = _build_regex(pattern)\n\n if intervals is None:\n # Simply count for the entire sequence.\n count = sum(_count_sequence(reference_seq[seq], regex=regex)\n for seq in reference_seq.keys()) # yapf: disable\n else:\n # Flatten intervals, and then only count for sequences\n # within the flattened intervals.\n merged_intervals = list(merge_genomic_intervals(intervals))\n\n seqs = [\n reference_seq[chrom][start:end]\n for chrom, start, end in merged_intervals\n ]\n\n count = sum(_count_sequence(seq, regex=regex) for seq in seqs)\n\n return count", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def occurrences_re(pattern, string):\n exp = re.compile(pattern)\n o = []\n for i in exp.finditer(string):\n o.append([i.start(), i.end()])\n return o", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def get_terminals_count(self, sequence: str) -> int:\n\n res = 0\n\n for terminal in self._terminals:\n if terminal != '':\n res += sequence.count(terminal)\n\n return res", "def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions" ]
[ "0.8225791", "0.757032", "0.74906814", "0.7362785", "0.7350869", "0.73212504", "0.7278474", "0.7140979", "0.707895", "0.6971106", "0.6928305", "0.6643842", "0.65604806", "0.64908123", "0.6395848", "0.6317644", "0.6313916", "0.6293871", "0.62750435", "0.62558323", "0.6236206", "0.62350285", "0.61474967", "0.6138225", "0.6138225", "0.61251825", "0.6114028", "0.60748726", "0.60699207", "0.60663867" ]
0.76051104
1
Merges overlapping genomic intervals.
def merge_genomic_intervals(intervals): # type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]] # Group intervals by chromosome. grouped_intervals = itertools.groupby( sorted(intervals), operator.itemgetter(0)) # Now yield merged intervals per chromosome. for chrom, grp in grouped_intervals: chrom_intervals = [interval[1:] for interval in grp] for low, high in merge_intervals(chrom_intervals, is_sorted=True): yield chrom, low, high
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_ranges():", "def test_merge_intervals():\n\n a = pybedtools.example_bedtool(\"a.bed\") # path to test file a\n # This file looks like this:\n # chr1\t1\t100\tfeature1\t0\t+\n # chr1\t100\t200\tfeature2\t0\t+\n # chr1\t150\t500\tfeature3\t0\t-\n # chr1 900\t950\tfeature4\t0\t+\n\n assert len(a) == 4\n\n b = pybedtools.example_bedtool(\"b.bed\") # path to test file b\n # This file looks like this:\n # chr1\t155\t200\tfeature5\t0\t-\n # chr1\t800\t901\tfeature6\t0\t+\n\n assert len(b) == 2\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == 2\n # Merged file looks like this:\n # chr1\t1\t500\n # chr1\t800\t950", "def merge_overlapping_on_chrm_and_strand(intervals, coverage):\n sorted_by_lower_bound = sorted(intervals, key=lambda x: x.left)\n merged = []\n for higher in sorted_by_lower_bound:\n if not merged:\n merged.append(higher)\n else:\n lower = merged[-1]\n # test for intersection between lower and higher:\n # we know via sorting that lower[0] <= higher[0]\n if higher.left <= lower.right:\n upper_bound = int(max(lower.right, higher.right))\n new_peak = peak(lower.chrm, lower.left, upper_bound, lower.strand)\n new_peak.height = 0\n window = HTSeq.GenomicInterval(lower.chrm, lower.left, upper_bound, lower.strand)\n wincvg = np.fromiter(coverage[window], dtype='i')\n new_peak.height = int(max(wincvg))\n merged[-1] = new_peak # replace by merged interval\n else:\n merged.append(higher)\n return merged", "def merge_overlapping(self, stat=intervals_weighted_mean, sort=True):\n if sort:\n bed = self\n else:\n bed = self.sort()\n\n current_intervals = []\n for interval in bed:\n if len(current_intervals) == 0 or (current_intervals[-1].start < interval.end and\n current_intervals[-1].end > interval.start and\n current_intervals[-1].chrom == interval.chrom):\n current_intervals.append(interval)\n else:\n # merge\n intervals = np.array([(current.start, current.end,\n float(current.score) if current.score != '.' else np.nan)\n for current in current_intervals])\n merged_score = \"{:0.6f}\".format(stat(intervals))\n merged_strand = current_intervals[0].strand\n merged_start = min(intervals[:, 0])\n merged_end = max(intervals[:, 1])\n merged_chrom = current_intervals[0].chrom\n merged_name = current_intervals[0].name\n merged_interval = pybedtools.Interval(merged_chrom, merged_start, merged_end, name=merged_name,\n score=merged_score, strand=merged_strand)\n current_intervals = [interval]\n yield merged_interval", "def merge_overlapping_regions(regions):\n sorted_regions = sorted(regions, key=lambda r: (r.chromosome, r.start))\n\n merged_regions = []\n current_regions = []\n last_end = None\n for region in sorted_regions:\n if len(current_regions) == 0:\n current_regions.append(region)\n last_end = region.end\n elif region.chromosome == current_regions[0].chromosome and region.start < last_end:\n current_regions.append(region)\n last_end = max(last_end, region.end)\n else:\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n current_regions = [region]\n last_end = region.end\n\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n\n return merged_regions", "def merge_df_intervals(df, iv_func=lambda iv: iv.merge_hull()):\n if not \"strand\" in df.columns:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n\n out = []\n for chr_strand, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n new_order = iv_func(iv).to_tuples_last_id()\n new_df = df.iloc[[x[2] for x in new_order]].copy()\n new_df.loc[:, \"start\"] = [x[0] for x in new_order]\n new_df.loc[:, \"stop\"] = [x[1] for x in new_order]\n out.append(new_df)\n res = pd.concat(out)\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res.sort_values([\"chr\", \"start\"])", "def mergeOverlapping(intlist):\n \n intlist.sort(key=lambda x: x.minval)\n newint=[intlist[0]]\n\n for elem in intlist[1:]:\n try:\n newint[-1]=mergeIntervals(elem,newint[-1])\n except:\n newint.append(elem)\n \n return newint", "def test_merge_demo_intervals():\n a = pybedtools.BedTool(panel1_path)\n assert len(a) == 4\n b = pybedtools.BedTool(panel2_path)\n assert len(b) == 3\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == len(a) + len(b) - 1 # a and b have a shared interval", "def _combine_ind_ranges(ind_ranges_to_merge):\n ind_ranges_to_merge = sorted(ind_ranges_to_merge)\n stack = []\n result = []\n for curr in ind_ranges_to_merge:\n if len(stack) == 0:\n stack.append(curr)\n elif stack[-1][-1] >= curr[0]:\n prev = stack.pop()\n merged = sorted(list(set(prev + curr)))\n stack.append(merged)\n else:\n prev = stack.pop()\n result.append(prev)\n stack.append(curr)\n result += stack\n return result", "def test_rangesMerged(self):\n\n mergeAfter = MessageSet(1, 3)\n mergeBefore = MessageSet(6, 8)\n\n mergeBetweenSequence = mergeAfter + mergeBefore\n mergeBetweenNumber = mergeAfter + MessageSet(5, 7)\n\n self.assertEqual(list(mergeAfter + (2, 4)), [1, 2, 3, 4])\n self.assertEqual(list(mergeAfter + (3, 5)), [1, 2, 3, 4, 5])\n\n self.assertEqual(list(mergeBefore + (5, 7)), [5, 6, 7, 8])\n self.assertEqual(list(mergeBefore + (4, 6)), [4, 5, 6, 7, 8])\n\n self.assertEqual(list(mergeBetweenSequence + (3, 5)),\n [1, 2, 3, 4, 5, 6, 7, 8])\n self.assertEqual(list(mergeBetweenNumber + MessageSet(4)),\n [1, 2, 3, 4, 5, 6, 7])", "def test_mergeOverlapping(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n merged12 = mergeOverlapping([int1, int2])\n self.assertEqual([int12], merged12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n intneg1 = interval('[-1,0)')\n int0 = interval('[0,1)')\n intneg13 = interval('[-1,3]')\n self.assertEqual([intneg13], mergeOverlapping([intneg1, int0, int13]))\n self.assertEqual([intneg1, int3], mergeOverlapping([intneg1, int3]))\n self.assertEqual([int13], mergeOverlapping([int12, int3]))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n intnothing = mergeOverlapping([])\n self.assertEqual([], intnothing)\n self.assertEqual([int13, int58], mergeOverlapping([int12, int3, int58]))\n self.assertEqual([int13, int58], mergeOverlapping([int58, int13]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int3]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int2, int3, int12]))\n self.assertEqual([int1], mergeOverlapping([int1]))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, 4])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, int1])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"not an interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"[1,3]\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[12, \"hi\"], \"interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], int2])\n print(\"merge overlapping list test complete\")", "def merge_peaks(peaks, peak_size, merge_overlap, chrom_len):\n max_overlap = merge_overlap\n while len(peaks) > 1 and max_overlap >= merge_overlap:\n # find largest overlap\n max_i = 0\n max_overlap = peaks[0].end - peaks[1].start\n for i in range(1, len(peaks) - 1):\n peaks_overlap = peaks[i].end - peaks[i + 1].start\n if peaks_overlap > max_overlap:\n max_i = i\n max_overlap = peaks_overlap\n\n if max_overlap >= merge_overlap:\n # merge peaks\n peaks[max_i].merge(peaks[max_i + 1], peak_size, chrom_len)\n\n # remove merged peak\n peaks = peaks[: max_i + 1] + peaks[max_i + 2 :]\n\n return peaks", "def merge(intervals):\n intervals.sort(key=lambda x: x[0])\n # take the first interval\n merged = [intervals[0]]\n # loop through all the intervals\n for this_interval in intervals:\n if this_interval[0] <= merged[-1][1]:\n merged[-1] = (merged[-1][0], max(merged[-1][1], this_interval[1]))\n else:\n merged.append(this_interval)\n return merged", "def merge_df_intervals_with_callback(df, callback):\n if not \"strand\" in df:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n result = []\n for chr, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n subsets = iv.merge_hull().to_tuples_with_id()\n for s in subsets:\n sub_df = df.iloc[list(s[2])].copy()\n sub_df.at[:, \"start\"] = s[0]\n sub_df.at[:, \"stop\"] = s[1]\n row_data = callback(sub_df)\n if not isinstance(\n row_data, dict\n ): # and not (isinstance(row_data, pd.core.series.Series) and len(row_data.shape) == 1):\n print(\"type\", type(row_data))\n # print 'len(shape)', len(row_data.shape)\n print(callback)\n raise ValueError(\n \"Merge_function returned something other than dict (writing to the pandas series directly is very slow, call to_dict() on it, then modify it.)\"\n )\n if set(row_data.keys()) != set(df.columns):\n raise ValueError(\n \"Merge_function return wrong columns. Expected %s, was %s\"\n % (df.columns, list(row_data.keys()))\n )\n row_data[\"start\"] = s[0]\n row_data[\"stop\"] = s[1]\n\n result.append(row_data)\n res = pd.DataFrame(result).sort_values([\"chr\", \"start\"])\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res", "def solution(intervals):\n solution = Solution()\n output = solution.merge(intervals)\n\n print(output)", "def mergeIntervals(int1,int2):\n newint=interval('(-1,1)') \n if int1.minval>int2.minval or (int2.lrbd=='(' and int1.minval==int2.minval):\n int1,int2=int2,int1\n \n if isMergeable(int1,int2):\n newrtNum=max(int1.rtnum,int2.rtnum)\n if newrtNum==int2.rtnum:\n newint=interval(int1.lrbd+str(int1.lfnum)+','+str(newrtNum)+int2.upbd)\n else:\n newint=interval(int1.lrbd+str(int1.lfnum)+','+str(newrtNum)+int1.upbd)\n\n else:\n raise Cant_be_merged('Can\\'t be merged')\n \n return newint", "def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]", "def merge_reads(s1, s2, q1, q2, amplen):\n # If the amplicon is of length L and the reads are lengths l1, l2 then:\n # - read 1 from 0 to L-l2-1 inclusive doesn't overlap\n # - read 1 from L-l2 to l1-1 inclusive overlaps with read 2\n # - read 2 from 0 to l1+l2-L-1 inclusive overlaps with read 1\n # - read 2 from l1+l2-L to its end doesn't overlap\n\n # A picture for clarity:\n # s1 coords: 0 l1-1\n # | |\n # ----------------------------------------\n # ------------------------------\n # | | |\n # s1 coords: L-l2 | L-1\n # s2 coords: 0 l1+l2-L-1\n\n # Reverse complement read 2 and reverse its quality scores.\n s2 = reverse_complement(s2)\n q2 = q2[::-1]\n\n # This is where we'll put the merged sequence and quality score.\n s = np.zeros(amplen, dtype=np.int8)\n q = np.zeros(amplen, dtype=np.int8)\n\n # If the reads overlap correctly, then s1[offset+i] == s2[i], assuming s2 is\n # the reverse complement of the reverse read.\n offset = amplen - len(s2)\n\n # Fill in the parts of the merged sequence where the reads don't overlap.\n s[:offset] = s1[:offset]\n q[:offset] = q1[:offset]\n s[len(s1):] = s2[len(s1)+len(s2)-amplen:]\n q[len(s1):] = q2[len(s1)+len(s2)-amplen:]\n\n # Create a set of views into the overlapping region. We can directly compare\n # vs1[i] to vs2[i] and use that to fill in vs[i] with all indexing taken\n # care of.\n vs1 = s1[offset:]\n vq1 = q1[offset:]\n vs2 = s2[:len(vs1)]\n vq2 = q2[:len(vs1)]\n vs = s[offset:len(s1)]\n vq = q[offset:len(s1)]\n\n # Quality score of matching bases is the larger of the two quality\n # scores (this is a somewhat conservative low estimate). Quality\n # score of mismatched bases is the difference of the two quality\n # scores. If the mismatched bases have equal quality scores, the\n # base is written as an N with the minimum possible quality.\n\n # Positions where the reads agree.\n ieq = vs1 == vs2\n vs[ieq] = vs1[ieq]\n vq[ieq] = np.maximum(vq1[ieq], vq2[ieq])\n\n # Positions where the reads disagree.\n ineq = vs1 != vs2\n mismatches = ineq.sum()\n\n # Positions where the reads disagree and read 1 has the higher quality.\n ir1 = np.logical_and(ineq, vq1 > vq2)\n vs[ir1] = vs1[ir1]\n vq[ir1] = MIN_QUAL + vq1[ir1] - vq2[ir1]\n\n # Positions where the reads disagree and read 2 has the higher quality.\n ir2 = np.logical_and(ineq, vq2 > vq1)\n vs[ir2] = vs2[ir2]\n vq[ir2] = MIN_QUAL + vq2[ir2] - vq1[ir2]\n\n # Positions where the reads disagree and they have equal qualities.\n irn = np.logical_and(ineq, vq1 == vq2)\n vs[irn] = bN\n vq[irn] = MIN_QUAL\n\n return s, q, mismatches", "def mergeSeq(left, right):\n i = j = 0\n result = []\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n\n result += left[i:]\n result += right[j:]\n return result", "def test_mergeIntervals(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n merged12 = mergeIntervals(int1, int2)\n self.assertEqual(int12, merged12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n self.assertEqual(int13, mergeIntervals(int12, int3))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, int4)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int4, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int3, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int3, int58)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, 4)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, \"not an interval\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, \"[1,3]\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([], \"\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([12, \"hi\"], \"interval\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, \"\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([], int2)\n print(\"merge test complete\")", "def merge_in(self, other, convert_to_string=True):\n assert isinstance(other, ExtendedAlignment)\n #_LOG.debug(\"Merging started ...\")\n if other.is_empty():\n return\n me = 0\n she = 0 # Assumption: alignments are female!\n me_len = self.get_length() if not self.is_empty() else 0\n she_len = other.get_length()\n insertion = -1\n\n merged_insertion_columns = 0\n\n ''' Add sequences from her to my alignment '''\n for f in other.fragments:\n self.fragments.add(f)\n if convert_to_string:\n self.from_string_to_bytearray()\n\n selfother = {}\n for k, v in other.items():\n # assert(k not in self,\n # \"Merging overlapping alignments not implemented\")\n if k not in self:\n selfother[k] = bytearray(v, encoding=\"utf8\")\n while True:\n ''' Check exit conditions'''\n if me == me_len and she == she_len:\n break\n\n ''' Check the 5 possible statuses between she and I '''\n if she != she_len and other.is_insertion_column(she):\n if me != me_len and self.is_insertion_column(me):\n ''' We both have a series of insertion columns'''\n start = me\n while(me != me_len and self.is_insertion_column(me) and\n she != she_len and other.is_insertion_column(she)):\n me += 1\n she += 1\n merged_insertion_columns += 1\n run = me - start\n self.col_labels[start:me] = list(range(\n insertion, insertion-run, -1))\n else:\n ''' Hers is a series of insertion columns'''\n start = she\n while she != she_len and other.is_insertion_column(she):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = list(range(\n insertion, insertion - run, -1))\n insertion -= run\n me += run\n me_len += run\n elif me != me_len and self.is_insertion_column(me):\n ''' Mine is a series of insertion column'''\n start = me\n while me != me_len and self.is_insertion_column(me):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n self.col_labels[start:me] = list(\n range(insertion, insertion-run, -1))\n insertion -= run\n elif(she == she_len or (me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n ''' My column is not present (i.e. was allgap) in the\n \"other\"'''\n start = me\n while(me < me_len and (she == she_len or me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n elif(me == me_len or (she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n ''' Her column is not present (i.e. was allgap) in \"me\"'''\n start = she\n while(she < she_len and (me == me_len or she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = other.col_labels[start:she]\n me += run\n me_len += run\n elif self.col_labels[me] == other.col_labels[she]:\n ''' A shared column'''\n while(me < me_len and she < she_len and\n self.col_labels[me] == other.col_labels[she]):\n she += 1\n me += 1\n else:\n raise \"hmmm, we thought this should be impossible? %d %d\" % (\n me, she)\n\n self.update(selfother)\n\n if convert_to_string:\n self.from_bytearray_to_string()\n #_LOG.debug(\"Merging finished ...\")\n\n return merged_insertion_columns", "def joinIntervalsSum(myIntervals,start='start',end='end',score='readcount',sampleName=\".\",offset=0):\n \n if not myIntervals: return myIntervals\n non_overlapping = []\n sep = {'+':[],'-':[]}\n \n print \"Splitting intervals by strand\"\n for i in myIntervals:\n sep[i.strand].append(i)\n \n print \"Joining intervals...\"\n for strand in sep.keys():\n print strand\n intervals = sep[strand]\n intervals.sort()\n \n \n current = copy.copy(intervals[0])\n for x in intervals[1:]:\n next = copy.copy(x)\n if current.intersects(next, start=start, end=end,offset=offset):\n current.end = max(current.end,next.end)\n current.__dict__[score] = current.__dict__[score]+next.__dict__[score]\n else:\n current.name = sampleName\n non_overlapping.append(current)\n current = copy.copy(next)\n current.name=sampleName\n non_overlapping.append(current)\n print \"Sorting intervals\"\n non_overlapping.sort()\n print \"Done\"\n return non_overlapping", "def overlaps(self, other):\n pass", "def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]", "def region_gene_overlap(\n region_pr,\n gene_bed,\n up=100_000,\n down=100_000,\n):\n genes = pr.read_bed(gene_bed)\n # Convert to DataFrame & we don't need intron/exon information\n genes = genes.as_df().iloc[:, :6]\n\n # Get the TSS only\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] = genes.loc[\n genes[\"Strand\"] == \"+\", \"Start\"\n ]\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] = genes.loc[\n genes[\"Strand\"] == \"-\", \"End\"\n ]\n\n # Extend up and down\n genes.loc[genes[\"Strand\"] == \"+\", \"Start\"] -= up\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] += down\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] -= down\n genes.loc[genes[\"Strand\"] == \"-\", \"End\"] += up\n\n # Perform the overlap\n genes = pr.PyRanges(genes)\n genes = genes.join(region_pr).as_df()\n\n return genes", "def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def overlapping_ranges(\n ranges_1: Sequence[Tuple[int, int]],\n ranges_2: Sequence[Tuple[int, int]],\n) -> List[Tuple[int, int]]:\n return [\n (max(first[0], second[0]), min(first[1], second[1]))\n for first in ranges_1\n for second in ranges_2\n if max(first[0], second[0]) < min(first[1], second[1])\n ]", "def __init__(self):\n        self.intervals = []\n        \n    ### O(len(intervals))\n    def addNum(self, val: int) -> None:\n        if(len(self.intervals) == 0):\n            self.intervals.append([val, val])\n            return\n        \n        flag, left = 1, -math.inf\n        for i, interval in enumerate(self.intervals):\n            for point in interval:\n                right = point\n                if(left == val or right == val):\n                    return \n                elif(left < val and right > val):\n                    if(flag):\n                        ### merge case\n                        if(val == left+1 and val == right -1):\n                            self.intervals[i-1][1] = self.intervals[i][1]\n                            self.intervals.pop(i)\n                        elif(val == left+1):\n                            self.intervals[i-1][1] = val\n                        elif(val == right-1):\n                            self.intervals[i][0] = val\n                        else:\n                            self.intervals.insert(i, [val, val])\n                    ### val in one of the existing intervals\n                    return", "def resolveRanges( self, left_ranges, right_ranges):\n new_left_ranges = []\n new_right_ranges = []\n \n ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n ranges.sort()\n \n last_left, last_right, last_is_right = ranges[0]\n for this_left, this_right, this_is_right in ranges[1:]:\n \n ## if segment is the same type, just combine\n if (last_is_right and this_is_right) or (not last_is_right and not this_is_right):\n last_right = this_right\n continue\n \n ## write if not consecutive and there is a small gap\n if this_left - last_right > self.min_segment_size:\n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## if current segment is too small: add to current type\n if (this_right - this_left) < self.min_segment_size:\n last_right = this_right\n continue\n \n ## if previous segment is too small to be output: add to next type\n if (last_right - last_left) < self.min_segment_size:\n last_right = this_right\n last_is_right = this_is_right\n continue\n \n ## otherwise: output\n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right \n \n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n self.debug( \"ranges=%s\" % str(ranges), 4 )\n self.debug( \"new_left_ranges=%s\" % str(new_left_ranges), 4)\n self.debug( \"new_right_ranges=%s\" % str(new_right_ranges), 4 )\n \n return new_left_ranges, new_right_ranges" ]
[ "0.75773865", "0.6600593", "0.6574509", "0.6569135", "0.63968265", "0.63313943", "0.6322615", "0.624788", "0.6216611", "0.6146239", "0.6133331", "0.60140103", "0.60021335", "0.59763306", "0.5970033", "0.58648163", "0.58545077", "0.57922995", "0.57907534", "0.57814354", "0.57276565", "0.57117796", "0.570119", "0.5663055", "0.5637245", "0.5615576", "0.5611965", "0.5564089", "0.55608976", "0.5557894" ]
0.6790975
1
Read CSV in folder "general" in database. Also used in setup.py
def open_general(file, setup=False): try: if setup is False: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.csv') elif setup is True: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.py') else: df = None # not tested here return df except FileNotFoundError as e: print("There is no record of {} in your database. Go to your chosen setup path to check, if not there go to " "Github and download the missing sheet".format(file)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def read_csv_file(self):\n pass", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def getReal(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/True.csv\")", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata", "def get_data(self, csv_file):\n pass", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def read_csv_file(dir_name, csv_file, collection, error_list):\n count = 0\n try:\n filename = os.path.join(dir_name, csv_file)\n with open(filename, 'r') as file:\n csv_reader = csv.DictReader(file)\n # create the document for products collection\n for row in csv_reader:\n collection.insert_one(row)\n except FileNotFoundError:\n LOGGER.info('FileNotFoundError')\n count += 1\n except Exception as error:\n count += 1\n LOGGER.info('Exception:')\n LOGGER.info(error)\n error_list.append(count)", "def loadCSV(input_file):", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)", "def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items", "def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)", "def update_csv():\n return os.listdir('./data')", "def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')", "def load_file_data_from_db(sip, base_path):\n my_entry = FSEntries(sip)\n md_object = add_collection_name(my_entry.md_info, base_path)\n return md_object", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def load_cfda(fullpath):\n try:\n with open(fullpath, errors='backslashreplace') as csvfile:\n\n reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"', skipinitialspace='true')\n for row in reader:\n cfda_program, created = CFDAProgram.objects.get_or_create(\n program_number=row['Program Number'])\n\n cfda_program.data_source = \"USA\"\n cfda_program.program_title = row['Program Title']\n cfda_program.popular_name = row['Popular Name (020)']\n cfda_program.federal_agency = row['Federal Agency (030)']\n cfda_program.authorization = row['Authorization (040)']\n cfda_program.objectives = row['Objectives (050)']\n cfda_program.types_of_assistance = row['Types of Assistance (060)']\n cfda_program.uses_and_use_restrictions = row['Uses and Use Restrictions (070)']\n cfda_program.applicant_eligibility = row['Applicant Eligibility (081)']\n cfda_program.beneficiary_eligibility = row['Beneficiary Eligibility (082)']\n cfda_program.credentials_documentation = row['Credentials/Documentation (083)']\n cfda_program.pre_application_coordination = row['Preapplication Coordination (091)']\n cfda_program.application_procedures = row['Application Procedures (092)']\n cfda_program.award_procedure = row['Award Procedure (093)']\n cfda_program.deadlines = row['Deadlines (094)']\n cfda_program.range_of_approval_disapproval_time = row['Range of Approval/Disapproval Time (095)']\n cfda_program.appeals = row['Appeals (096)']\n cfda_program.renewals = row['Renewals (097)']\n cfda_program.formula_and_matching_requirements = row['Formula and Matching Requirements (101)']\n cfda_program.length_and_time_phasing_of_assistance = row['Length and Time Phasing of Assistance (102)']\n cfda_program.reports = row['Reports (111)']\n cfda_program.audits = row['Audits (112)']\n cfda_program.records = row['Records (113)']\n cfda_program.account_identification = row['Account Identification (121)']\n cfda_program.obligations = row['Obligations (122)']\n cfda_program.range_and_average_of_financial_assistance = row['Range and Average of Financial Assistance (123)']\n cfda_program.program_accomplishments = row['Program Accomplishments (130)']\n cfda_program.regulations_guidelines_and_literature = row['Regulations, Guidelines, and Literature (140)']\n cfda_program.regional_or_local_office = row['Regional or Local Office (151) ']\n cfda_program.headquarters_office = row['Headquarters Office (152)']\n cfda_program.website_address = row['Website Address (153)']\n cfda_program.related_programs = row['Related Programs (160)']\n cfda_program.examples_of_funded_projects = row['Examples of Funded Projects (170)']\n cfda_program.criteria_for_selecting_proposals = row['Criteria for Selecting Proposals (180)']\n cfda_program.url = row['URL']\n cfda_program.recovery = row['Recovery']\n cfda_program.omb_agency_code = row['OMB Agency Code']\n cfda_program.omb_bureau_code = row['OMB Bureau Code']\n if row['Published Date']:\n cfda_program.published_date = datetime.strptime(row['Published Date'], '%b, %d %Y')\n if row['Archived Date']:\n cfda_program.archived_date = datetime.strptime(row['Archived Date'], '%b, %d %Y')\n\n cfda_program.save()\n\n # self.logger.log(20, \"loaded %s %s \", cfda_program.program_number, cfda_program)\n\n except IOError:\n logger = logging.getLogger('console')\n logger.log(\"Could not open file to load from\")", "def read_files(folder):\n print_header(\"READING FILES FROM FOLDER (RECURSIVE)\", \"=\")\n files = []\n for dirpath, dirnames, filenames in os.walk(folder):\n if not dirpath.endswith(\"updates\"):\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext.lower() == \".sql\":\n full_path = os.path.join(dirpath, filename)\n with open(full_path, \"r\") as f:\n sql = f.read()\n sql = sql.decode(\"latin-1\")\n\n files.append((filename, sql))\n return files", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = mdb.ImportData(key, tmp_file)\n result = mongo_insert.import_data()\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def _load_csv(root_path, table_meta):\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data", "def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())", "def import_data(directory_name, product_file, customer_file, rentals_file):\n customer = DATABASE['customer']\n product = DATABASE['product']\n rental = DATABASE['rental']\n\n counts = (\n import_csv_to_mongodb(product, f\"{directory_name}/{product_file}\"),\n import_csv_to_mongodb(customer, f\"{directory_name}/{customer_file}\"),\n import_csv_to_mongodb(rental, f\"{directory_name}/{rentals_file}\")\n )\n return counts", "def read_dataset_files(datasetid, clean_folder):\n fnotu = datasetid + '.otu_table.clean.feather'\n fnmeta = datasetid + '.metadata.clean.feather'\n\n df = feather.read_dataframe(os.path.join(clean_folder, fnotu))\n # Feather format does not support index names, first column has index\n df.index = df.iloc[:,0]\n df = df.iloc[:, 1:]\n\n meta = feather.read_dataframe(os.path.join(clean_folder, fnmeta))\n meta.index = meta.iloc[:, 0]\n meta = meta.iloc[:, 1:]\n\n ## Make sure sample names are strings\n if df.index.dtype != 'O':\n df.index = pd.read_csv(os.path.join(clean_folder, fnotu), sep='\\t', dtype=str).iloc[:,0]\n\n if meta.index.dtype != 'O':\n meta.index = pd.read_csv(os.path.join(clean_folder, fnmeta), sep='\\t', dtype=str).iloc[:,0]\n\n return df, meta", "def readInCSV(csvFile):\n\tprint \"Checking if helper app is installed...\"\n\tandroidCheckAndInstallHelper()\n\ttry:\n\t\tprint \"Will read in the files from %s\" % csvFile\n\t\tstatus = subprocess.call([\"adb\",\"shell\",\"am\",\"startservice\",\n\t\t\t\t\t\t\t\t \"-a\", \"com.synchronoss.androidDev.contactcreaterapp.action.IMPORT\",\n\t\t\t\t\t\t\t\t \"-e\", \"CSV\", csvFile,\n\t\t\t\t\t\t\t\t \"com.synchronoss.androidDev.contactcreaterapp/.CreateAndAddContacts\"],\n\t\t\t\t\t\t\t\t stdout=stdout,stderr=stderr)\n\t\tif (status == 1):\n\t\t\tprint \"Contacts successfully copied from csv on target device.\"\n\t\tif (status != 0):\n\t\t\tprint >>sys.stderr, \"Unable to launch contact adder app\"\n\t\t\tsys.exit()\n\texcept OSError as e:\n\t\tprint >>sys.stderr, \"Execution failed: \", e\n\t\tsys.exit()\n\twaitForHelperApp()", "def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)", "def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()" ]
[ "0.6300106", "0.61802393", "0.6075419", "0.60727006", "0.606177", "0.6019453", "0.5933561", "0.58593744", "0.5730161", "0.5652817", "0.56373644", "0.5618824", "0.55967456", "0.5585875", "0.55683404", "0.5567126", "0.5550129", "0.5536807", "0.55361545", "0.55278426", "0.5509099", "0.55055", "0.54797417", "0.54628044", "0.5454694", "0.5448278", "0.5446893", "0.5443432", "0.5435057", "0.5431615" ]
0.65410346
0
Determines whether the discrepancy has been sufficiently resolved; used as return value for fix_discrepancy.
def discrepancy_resolved(self): # If there's a discrepancy and distance change matches the existing data, we're good. if self.distance_change == self.existing_data: return True # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good elif self.recommend_updates: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def is_solved(self):\n\n marker = self._marker\n amount_of_pegs = 0\n for row in marker:\n for i in row:\n if i == \"*\":\n amount_of_pegs += 1\n return amount_of_pegs == 1", "def is_solved(self):\n marker = self._marker\n\n count = 0\n for row in marker:\n for piece in row:\n if piece == \"*\":\n count += 1\n if count == 1:\n return True\n else:\n return False", "def is_solved(self):\n peg_count = 0\n for row in self._marker:\n for item in row:\n if item == '*':\n peg_count += 1\n return peg_count == 1", "def check_initial_confidence(self): # pragma: no cover\n if self.test_type != 'perf':\n return True\n\n if self.required_initial_confidence is None:\n return True # pragma: no cover\n\n # TODO(robertocn): Remove all uses of \"confidence\".\n if self.dummy_initial_confidence is not None:\n self.initial_confidence = float(\n self.dummy_initial_confidence)\n if (float(self.initial_confidence) <\n float(self.required_initial_confidence)):\n self._set_insufficient_confidence_warning()\n return False\n return True\n\n if self.dummy_builds:\n dummy_result = self.good_rev.values != self.bad_rev.values\n if not dummy_result:\n self._set_insufficient_confidence_warning()\n return dummy_result\n\n with self.api.m.step.nest('Re-testing reference range'):\n expiration_time = time.time() + REGRESSION_CHECK_TIMEOUT\n while time.time() < expiration_time:\n if len(self.good_rev.values) >= 5 and len(self.bad_rev.values) >= 5:\n if self.significantly_different(self.good_rev.values,\n self.bad_rev.values):\n return True\n if len(self.good_rev.values) == len(self.bad_rev.values):\n revision_to_retest = self.last_tested_revision\n else:\n revision_to_retest = min(self.good_rev, self.bad_rev,\n key=lambda x: len(x.values))\n if len(revision_to_retest.values) < MAX_REQUIRED_SAMPLES:\n revision_to_retest.retest()\n else:\n break\n self._set_insufficient_confidence_warning()\n return False", "def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True", "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def is_equivalence(self) -> bool:", "def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def is_solved(self):\n return self._start == self._target", "def is_inequality(self):\n return True", "def is_inequality(self):\n return True", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def _has_needs_correcting(self, dframe):\n return (dframe.loc[dframe.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING)].shape[0] > 0)", "def is_solved(self):\n self.solved = self.current_pos == self.finish_pos\n return self.solved", "def is_inequality(self):\n return False", "def is_deficient(n):\r\n if sum_proper_divisors(n) < n:\r\n return True\r\n else:\r\n return False", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13", "def is_inequality(self): \n return False", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def is_deficient_number(x):\n return sum(proper_divisors(x)) < x", "def did_solve(self) -> bool:\n return self._stats[\"success\"]", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_solved(self):\n return self._from_word == self._to_word", "def is_correctness_available_for_response(self, response):\n return True", "def isInconsistent(self, problemname : str) -> bool:\n return problemname in self.inconsistentset", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid" ]
[ "0.6593381", "0.622655", "0.62037015", "0.6201302", "0.6191842", "0.61888754", "0.60700476", "0.6044539", "0.6014487", "0.6009676", "0.5961862", "0.5950892", "0.5950892", "0.59451425", "0.5910703", "0.58750445", "0.5857207", "0.582537", "0.5819653", "0.580809", "0.5805597", "0.5798865", "0.5796407", "0.5755686", "0.57459867", "0.5712142", "0.5700134", "0.5697857", "0.5697857", "0.5697857" ]
0.7586293
0
Run when the palette is closed
def on_palette_close(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n try:\r\n self.cmd_object_.on_palette_close()\r\n\r\n except:\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n ui.messageBox('Failed During Palette Close:\\n{}'.format(traceback.format_exc()))", "def on_stop(self):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n palette = ui.palettes.itemById(self.palette_id)\r\n\r\n for handler in self.html_handlers:\r\n palette.incomingFromHTML.remove(handler)\r\n\r\n if palette:\r\n palette.deleteMe()\r\n\r\n super().on_stop()", "def _on_close(self):\n self.shell_obj.closed()", "def cleanup(self) -> None:\n colorama.deinit()", "def __window_close(self):\n pass", "def __onclosing(self):\n self.window.destroy()", "def on_closing(self, *args):\n pass", "def on_cleanup(self):\n\n pygame.quit()", "def __on_close(self):\n # Release the resource and\n # close the windows\n LOGGER.info(\"closing...\")\n self.__quit.set()\n self.__detect.end()\n self.root.quit()", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def end(self, event):\n plt.close()", "def _finish(self):\n steppable_registry = CompuCellSetup.persistent_globals.steppable_registry\n steppable_registry.finish()\n self.close_frames()", "def on_close(self, event):\n # Save pos and size\n x, y = self.GetPosition()\n width, height = self.GetSize()\n self.__config.set('window.x', x)\n self.__config.set('window.y', y)\n self.__config.set('window.width', width)\n self.__config.set('window.height', height)\n\n # Style\n style = self.GetWindowStyle()\n self.__config.set('window.style', style)\n\n self.__config.save()\n\n # Stop monitoring\n self.__cor.stop_monitor()\n\n # Kill graph as it seems to be stopping script from ending\n self.__graph = None\n\n # End\n event.Skip()", "def on_palette_execute(self, palette: adsk.core.Palette):\r\n pass", "def onCloseWindow(self, event):\r\n\r\n self.Destroy()", "def close(self, event, data):\n try:\n with open(self.save_file, \"w+\") as save_file:\n try:\n data = json.load(save_file)\n except ValueError:\n data = dict()\n data[\"color\"] = rgb_to_hex(self.rgb_color)\n json.dump(data, save_file)\n except (OSError, json.JSONDecodeError):\n print(\"Error when trying to set save file.\")\n Gtk.main_quit()", "def close_preferences(self,event):\n self.Destroy()\n event.Skip()", "def on_cleanup(self):\n self.close()", "def on_cleanup(self):\n self.close()", "def handle_close(self):\n self.active = False\n self.close()", "def cb_close(self, *args):\n Gtk.main_quit()", "def done(self):\n if self.pbar is not None:\n self.pbar.close()\n self.pbar = None\n self.counter = 0", "def onClose (self):\n \n pass", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def onQuit(self, event):\n\n\t\tself.onClose(None)", "def finalize(self):\n self.thread.quit()\n self.color.release()\n self.pos.release()\n\n if self.initCoordinates.f_timer is not None:\n for f_timer in self.initCoordinates.f_timer:\n self.timer.addFunctionTimer(f_timer)\n if self.numMethod.f_timer is not None:\n for f_timer in self.numMethod.f_timer:\n self.timer.addFunctionTimer(f_timer)", "def onClose(self, *args):\n rospy.loginfo('Closing Cloud Map')\n self.root.quit()\n self.root.destroy()\n # rospy.signal_shutdown('Exited UI')", "def cog_unload(self):\n self._get_sketch_prompt.cancel()", "def onClose(self, event): \n \n self.Destroy()\n return", "def on_unload(self):\n pass" ]
[ "0.767233", "0.6960615", "0.67901963", "0.6604122", "0.6592666", "0.64696324", "0.64392585", "0.6405782", "0.6362652", "0.6352815", "0.6348606", "0.6346555", "0.63247025", "0.6313968", "0.6289701", "0.6276552", "0.6267763", "0.62607265", "0.62607265", "0.6249281", "0.623151", "0.62173563", "0.62118787", "0.6207149", "0.62064016", "0.6189255", "0.61469436", "0.60886616", "0.60793936", "0.60749465" ]
0.91420245
0
Function is run when the palette is executed. Useful to gather initial data and send to html page
def on_palette_execute(self, palette: adsk.core.Palette): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n try:\r\n\r\n # Create and display the palette.\r\n palette = ui.palettes.itemById(self.cmd_object_.palette_id)\r\n\r\n if not palette:\r\n palette = ui.palettes.add(\r\n self.cmd_object_.palette_id,\r\n self.cmd_object_.palette_name,\r\n self.cmd_object_.palette_html_file_url,\r\n self.cmd_object_.palette_is_visible,\r\n self.cmd_object_.palette_show_close_button,\r\n self.cmd_object_.palette_is_resizable,\r\n self.cmd_object_.palette_width,\r\n self.cmd_object_.palette_height,\r\n True\r\n )\r\n\r\n # Add handler to HTMLEvent of the palette.\r\n on_html_event_handler = _HTMLEventHandler(self.cmd_object_)\r\n palette.incomingFromHTML.add(on_html_event_handler)\r\n self.cmd_object_.handlers.append(on_html_event_handler)\r\n self.cmd_object_.html_handlers.append(on_html_event_handler)\r\n\r\n # Add handler to CloseEvent of the palette.\r\n on_closed_handler = _PaletteCloseHandler(self.cmd_object_)\r\n palette.closed.add(on_closed_handler)\r\n self.cmd_object_.handlers.append(on_closed_handler)\r\n\r\n else:\r\n main_url = urlparse(self.cmd_object_.palette_html_file_url)\r\n current_url = urlparse(palette.htmlFileURL)\r\n\r\n if not (\r\n (not self.cmd_object_.palette_force_url_reload) &\r\n (main_url.netloc == current_url.netloc) &\r\n (main_url.path == current_url.path)\r\n ):\r\n # ui.messageBox(current_url.netloc + \" vs. \" + main_url.netloc)\r\n # ui.messageBox(current_url.path + \" vs. \" + main_url.path)\r\n # ui.messageBox(str(self.cmd_object_.palette_force_url_reload))\r\n palette.htmlFileURL = self.cmd_object_.palette_html_file_url\r\n\r\n palette.isVisible = True\r\n\r\n self.cmd_object_.on_palette_execute(palette)\r\n\r\n except:\r\n ui.messageBox('Palette ({}) Execution Failed: {}'.format(\r\n self.cmd_object_.palette_html_file_url,\r\n traceback.format_exc())\r\n )", "def open(self):\n print('palette c_edge heat50 .6')\n print('palette c_vertex heat50 .9')\n print('palette c_sus_range heat10 .6')\n print('palette c_sus heat10 .9')\n print('palette c_inf_range heat85 .6')\n print('palette c_inf heat85 .9')\n print('palette c_wait_sus_range heat30 .6')\n print('palette c_wait_sus heat30 .9')", "def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0", "def post_start(self):", "def prepare_UI(self):", "def run(self):\n self.print_welcome()\n self.handle_inputs()", "def on_window_ready(self):\n pass", "def postRun(self):\n pass", "def do_startup(self):\n \n import json\n\n GLib.set_application_name(\"Deity\")\n Gtk.Application.do_startup(self)\n \n settings = self.get_settings()\n\n menub = Gtk.MenuButton(name=\"input-menu_button\",\n use_popover=True)\n\n headerbar = Gtk.HeaderBar(name=\"input-headerbar\",\n show_close_button=True,\n title=\"Deity\")\n\n main_grid = Gtk.Grid(name=\"input-main_grid\")\n\n statusbar = Gtk.Box(name=\"input-statusbar\",\n orientation=0,\n spacing=2)\n statusbar.pack_start(self.statuslabel, 1, 1, 1)\n\n self.connector.connect(\"query-status\", self.show_output)\n self.connector.connect(\"query-waiting\",\n lambda wid, count: self.statuslabel.set_text(\n f\"Queries on hold : {count}\"))\n self.connector.connect(\"request\", print)\n\n headerbar.pack_end(menub)\n\n main_grid.attach(self.iogrid.get_widget(), 0, 0, 1, 1)\n main_grid.attach(statusbar, 0, 1, 1, 1)\n\n self.output_window.add(self.get_placeholder_image())\n\n self.window.set_titlebar(headerbar)\n self.window.set_default_icon_from_file(\"artwork/Logo.png\")\n self.window.add(main_grid)\n\n self.window.connect(\"key-press-event\", self.parse_keypress)\n self.window.connect(\"delete-event\", self.request_quit)\n \n self.other[\"connector\"] = self.connector\n self.other[\"headerbar\"] = headerbar\n self.other[\"history\"] = self.history\n self.other[\"input-window\"] = self.window\n self.other[\"iogrid\"] = self.iogrid\n self.other[\"plugins\"] = self.get_plugins(settings[\"enabled-plugins\"])\n self.other[\"statusbar\"] = statusbar\n self.other[\"statuslabel\"] = self.statuslabel\n self.other[\"output-notebook\"] = self.notebook\n self.other[\"output-window\"] = self.output_window\n self.other[\"main-grid\"] = main_grid\n self.other[\"menu_button\"] = menub\n \n self.apply_settings(settings)\n self.current_prompt = self.iogrid.add_prompt()\n\n self.window.set_application(self)\n self.output_window.set_application(self)\n\n self.output_window.move(800, 150)\n self.window.move(75, 160)", "def setup(self):\n header_print(self.data['intro'])\n header_print(self.data['help'])\n random.shuffle(self.data['draw'])\n random.shuffle(self.data['locations'])\n random.shuffle(self.data['events'])\n random.shuffle(self.data['aces'])\n random.shuffle(self.data['personalities'])\n self.stats = {\n 'round': 0,\n 'powers': {\n 'MOONS': 6,\n 'SUNS': 6,\n 'WAVES': 6,\n 'LEAVES': 6,\n 'WYRMS': 6,\n 'KNOTS': 6,\n },\n 'hand': self.data['draw'][:],\n 'discard': [],\n 'active': [],\n 'opponent': {},\n }", "def script(self):", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def setup_page(self):\r\n raise NotImplementedError", "def setup_page(self):\n raise NotImplementedError", "def start_button_action(self):\n if self.dynamic.output_file.text() and os.path.isdir(\n self.dynamic.output_directory.text()\n ):\n\n additional_settings = {\n \"Save_data\": True,\n \"Filepath\": self.dynamic.output_directory.text(),\n \"Filename\": self.dynamic.output_file.text(),\n \"skip_init\": False,\n }\n\n # Generate a Lookuptable for the plots\n steps = (\n int(\n abs(\n float(self.dynamic.max_voltage_IV.value())\n / float(self.dynamic.voltage_steps_IV.value())\n )\n )\n + 1\n )\n self.cmapLookup = self.cmap.getLookupTable(1.0, 3.0, steps)\n self.variables.reset_plot_data()\n\n self.generate_dynamicwaiting_job(additional_settings)\n # self.variables.reset_plot_data()\n\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"Please enter a valid filepath and filename.\",\n QMessageBox.Ok,\n )", "def start_displayhook(self):\n pass", "def on_render(self, console):\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black", "def view(self):\n\t\tself.done(1)", "def initGui(self):\n\n icon_path = ':/plugins/EU_Mapper/EUICON.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Generate EU Map'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True", "def start(self):\n self.show_greeting()\n self.read_frame()", "def _initialise_run(self) -> None:", "def postloop(self):\n print 'Bye!'", "def on_palette_close(self):\r\n pass", "def _populate_output(self):\n pass", "def state_preview_do(cfg, app, win, events):", "def populating_popup(self, *args):\n return _ida_hexrays.Hexrays_Hooks_populating_popup(self, *args)", "def do_stuff(self):\n self.create_tourism_raster()", "def initializePage(self):\n WC.WizardPage.initialize(self)\n self.page.use(qt.QVBoxLayout())\n exp = self.give_field(\"exp-store\").give_exp(\"pressure\")\n grps = exp.find_groups(self.give_field(\"mesh\"))\n dims = [(u\"Pressure\", 1.)]\n tit = u\"Adding pressure on meshes groups\"\n # The last groups should be seen first\n grps.reverse()\n WC.add_condition_selector(self, grps, dims, \"pressure-loading*\", tit)", "def on_startup(self) -> None:\n ...", "def render(self):\n self.env.render()\n #input(\"Press enter to take a step \")" ]
[ "0.71663606", "0.5976665", "0.59089667", "0.5755218", "0.57544327", "0.56373245", "0.5611845", "0.56111276", "0.5609256", "0.55880344", "0.5586924", "0.5578858", "0.5556275", "0.5524903", "0.55076844", "0.5467774", "0.54665", "0.5456171", "0.543333", "0.543", "0.5428835", "0.541232", "0.53966635", "0.53932476", "0.5389214", "0.5388145", "0.5382311", "0.5368325", "0.5366644", "0.5354036" ]
0.6666663
1
Builds the selection spec.
def build_selection_spec(client_factory, name): sel_spec = client_factory.create('ns0:SelectionSpec') sel_spec.name = name return sel_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def makeSelection(self, selection=\"\"):\n\n\t\tif selection == \"\":\n\t\t\tprint \"usage: makeSelection(selection)\"\n\n\t\tsel_string = self.parseMacros(selection)\n\n\t\t# --- split by \";\" --- #\n\t\ttmp = []\n\t\tcols = []\n\t\tcols = sel_string.split(\";\")\n\t\tfor col in cols:\n\t\t\tinverse = False\n\t\t\tif col == \"\":\n\t\t\t\tcontinue\n\n\t\t\ttmp = string.split(col, \"=\")\n\t\t\tif \"!\" in tmp[0]:\n\t\t\t\tinverse = True\n\n\t\t\tif \"resi\" in tmp[0]:\n\t\t\t\tself.parseResI(tmp[1])\n\t\t\t\tself.invresi = inverse\n\t\t\telif \"resn\" in tmp[0]:\n\t\t\t\tself.parseResN(tmp[1])\n\t\t\t\tself.invresn = inverse\n\t\t\telif \"name\" in tmp[0]:\n\t\t\t\tself.parseAtom(tmp[1])\n\t\t\t\tself.invatom = inverse\n\t\t\telif \"element\" in tmp[0]:\n\t\t\t\tself.parseElement(tmp[1])\n\t\t\t\tself.invelement = inverse\t\n\t\t\telif \"chain\" in tmp[0]:\n\t\t\t\tself.parseChain(tmp[1])\n\t\t\t\tself.invchain = inverse\n\t\t\telif \"type\" in tmp[0]:\n\t\t\t\tself.parseType(tmp[1])\n\t\t\t\tself.invtype = inverse\n\t\t\telif \"cat\" in tmp[0]:\n\t\t\t\tself.parseCat(tmp[1])\n\t\t\t\tself.invcat = inverse\n\t\t\telif \"atomid\" in tmp[0]:\n\t\t\t\tself.parseAtomid(tmp[1])\n\t\t\t\tself.invatomid = inverse\n\t\t\telif \"BB\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"CEN\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O , CB \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"SC\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = True\n\t\t\telif \"HET\" in tmp[0]:\n\t\t\t\tself.parseType(\"HETATM\")\n\t\t\t\tself.invtype = inverse\n\t\t\telse:\n\t\t\t\tprint \"unrecognized selector: \",tmp[0]\n\t\t\t\tsys.exit()", "def build_sel_opt(self, this_param, sel_blob):\n for sel in sel_blob:\n this_sel = etree.SubElement(this_param, 'option', selected=sel['selected'], value=sel['value'])\n this_sel.text = sel['value']", "def create_select(qualifier, lines, select_id=None):\n options = {} #{ option : [Label]}\n for label in lines.keys():\n option = qualifier(label)\n if (option not in options):\n options[option] = []\n options[option].append(label)\n option_list = list(options.keys())\n option_list.sort()\n print '<select class=\"lines\"',\n if select_id is not None:\n print 'id=%s' % qa(select_id)\n print 'multiple=\"true\" size=\"10\" onchange=\"updateSvg();\">'\n for option in option_list:\n print '<option value=' + qa('[' + \n reduce(lambda x,y:x+json.dumps(str(y))+',',options[option],\"\")[0:-1]\n + ']') + '>'+qe(option)+'</option>'\n print '</select>'", "def __make_sel(selection):\n sel = []\n param = []\n for key, value in selection.iteritems(): \n if key == \"fn\":\n if value.find('%') >= 0:\n sel.append(\"irods_filepath like %s\")\n else:\n sel.append(\"irods_filepath = %s\")\n elif key == \"expid\":\n sel.append(\"exper_id = %s\".format(value))\n elif key == 'runnum':\n sel.append(\"runnum = %s\".format(value))\n elif key == 'status' and value:\n sel.append(\"status = %s\")\n else:\n continue\n param.append(value)\n\n q = \"WHERE {}\".format(\" AND \".join(sel)) if sel else \"\"\n return q, param", "def layout_selection(self):\n select_txt = wx.StaticText(self, -1, 'Selection Options')\n select_txt.SetForegroundColour('blue')\n self.selection_cbox = wx.ComboBox(self, -1, style=wx.CB_READONLY)\n list_of_options = ['Select all Data',\n 'Unselect all Data',\n 'Select all Data 1D',\n 'Unselect all Data 1D',\n 'Select all Data 2D',\n 'Unselect all Data 2D']\n for option in list_of_options:\n self.selection_cbox.Append(str(option))\n self.selection_cbox.SetValue('Select all Data')\n wx.EVT_COMBOBOX(self.selection_cbox, -1, self._on_selection_type)\n self.sizer5.AddMany([(select_txt, 0, wx.ALL, 5),\n (self.selection_cbox, 0, wx.ALL, 5)])\n self.enable_selection()", "def _build_conditional(self):\n # HERE\n self.output_sect = etree.SubElement(self.inputs, 'conditional', name='output_opt')\n self.output_sect_sel = etree.SubElement(self.output_sect, 'param', name='output_opt_sel', type='select', label='Additional output parameters?')\n self.opt_yes = etree.SubElement(self.output_sect_sel, 'option', value='yes')\n self.opt_yes.text = 'yes'\n self.opt_no = etree.SubElement(self.output_sect_sel, 'option', value='no', selected='true')\n self.opt_no.text = 'no'\n self.when_yes = etree.SubElement(self.output_sect, 'when', value='yes')", "def build_inputs_out_sel(self, params, parent):\n for param in params:\n new_name = param['name'] + '_sel'\n new_label = param['name'].replace('_', ' ').title()\n new_arg = '--' + new_name\n this_param = etree.SubElement(parent, 'param', name=new_name, argument=new_arg, type='boolean',\n truevalue=new_arg, falsevalue='', optional='true', checked='false',\n label=new_label, help=param['help'])", "def build(buffer,load_default=None):\n assert buffer is not None\n assert isinstance(buffer.lines,list)\n\n # compute 'load_default' flag\n # (maybe this block can be removed, as it seems we always set load_default to non-None value (to be confirmed))\n if load_default is None:\n if buffer.filename==sdconst.SELECTION_FROM_CMDLINE:\n load_default=False # don't load default files if selection is from command args\n else:\n load_default=True # load default files if selection is from stdin or file\n\n # create buffer selection\n selection=Selection()\n\n # store outer attributes ('outer' means attributes not stored in selection file)\n selection.filename=buffer.filename\n selection.path=buffer.path\n\n # store inner attributes ('inner' means attributes stored in selection file)\n parse_buffer(buffer.lines,selection)\n\n # merge some outer attributes with inner attributes (else they are not returned by merge_facets() method)\n process_parameter(\"selection_filename=%s\"%selection.filename,selection)\n process_parameter(\"selection_file=%s\"%selection.path,selection)\n\n # load default (file containing default parameters for all projects)\n default_selection=load_default_file(sdconfig.default_selection_file,load_default)\n\n\n # NOTE\n #\n # For project level default file to be loaded, the following must be true\n #\n # \"project must be specified in cli parameter or in the selection file or \n # in the global default file\" or inside an identifier (e.g. dataset_functional_id)\n\n\n # retrieve projects\n projects=get_projects(selection)\n\n # retrieve default projects\n default_projects=get_default_projects(default_selection)\n\n # \n if len(projects)==0:\n # project not present in CLI nor in 'selection file'\n\n # let's try if project is set in default file\n if len(default_projects)==0:\n project=None # no project set\n elif len(default_projects)==1:\n project=default_projects[0]\n elif len(default_projects)>1:\n print_too_many_projects_set_warning(default_projects)\n project=None # too many project: do not load project level default value\n\n elif len(projects)>0:\n\n # When project(s) are present in CLI or 'selection file',\n # project(s) from default file are ignored.\n # (see #34 for more info)\n if len(default_projects)>0:\n del default_selection.facets['project']\n \n\n if len(projects)==1:\n project=projects[0]\n elif len(projects)>1:\n print_too_many_projects_set_warning(projects)\n project=None # too many project: do not load project level default value\n\n\n # load project default (file containing default parameters for the project)\n project_default_selection=load_default_file(sdconfig.get_project_default_selection_file(project),load_default)\n\n project_default_selection.childs.append(selection) # add selection as child of project_default_selection\n selection.parent=project_default_selection # set project_default_selection as parent of selection\n\n default_selection.childs.append(project_default_selection) # add project_default_selection as child of default_selection\n project_default_selection.parent=default_selection # set default_selection as parent of project_default_selection\n\n return selection", "def __init__(self):\n \n # Call the super contructor\n super(Select, self).__init__(0, 0, 200, 50)\n\n # Assign personalisation attributes\n self.placeholder = \"Choose a value\"\n self.font_name: str = None\n self.font_size: int = 12\n\n self.bg_color: tuple = (0, 0, 0, 0)\n self.bg_hover: tuple = (255, 255, 255, 30)\n self.bg_press: tuple = None\n\n self.label_color: tuple = (255, 255, 255, 255)\n self.label_hover: tuple = None\n self.label_press: tuple = None\n\n self.border_color: tuple = (255, 255, 255, 255)\n self.border_hover: tuple = None\n self.border_press: tuple = None\n self.border_width: int = 4\n\n self.option_height: int = 45\n self.option_margin: int = 5\n self.option_font_name = None\n self.option_font_size = 12\n\n self.option_bg_color: tuple = (0, 0, 0, 0)\n self.option_bg_hover: tuple = (255, 255, 255, 30)\n self.option_bg_press: tuple = None\n self.option_bg_select: tuple = (255, 255, 255, 60)\n\n self.option_label_color: tuple = (255, 255, 255, 255)\n self.option_label_hover: tuple = None\n self.option_label_press: tuple = None\n self.option_label_select: tuple = None\n\n # Assign internal attributes\n self._is_hovered: bool = False\n self._is_pressed: bool = False\n self._is_opened: bool = False\n self._is_inverted: bool = False\n self._options: list = list()\n self._current_select: int = -1\n self._current_hover: int = -1\n\n self._bg = pyglet.shapes.Rectangle\n self._label = pyglet.text.Label\n self._border = Border\n\n self._option_border: Border = None\n self._options_bg: list = list()\n self._options_label: list = list()", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def get_selection(self, selection_name, format=None):", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def build(self) -> None:", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def _get_selection_params(self):\n # lazy imports to avoid hard dependency\n from tsfresh.defaults import (\n FDR_LEVEL,\n HYPOTHESES_INDEPENDENT,\n TEST_FOR_BINARY_TARGET_BINARY_FEATURE,\n TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n TEST_FOR_REAL_TARGET_REAL_FEATURE,\n )\n\n # Set defaults\n selection_params = {\n \"test_for_binary_target_binary_feature\": TEST_FOR_BINARY_TARGET_BINARY_FEATURE, # noqa: E501\n \"test_for_binary_target_real_feature\": TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n \"test_for_real_target_binary_feature\": TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n \"test_for_real_target_real_feature\": TEST_FOR_REAL_TARGET_REAL_FEATURE,\n \"fdr_level\": FDR_LEVEL,\n \"hypotheses_independent\": HYPOTHESES_INDEPENDENT,\n }\n\n # Replace defaults with user defined parameters\n for name in selection_params.keys():\n value = getattr(self, name)\n if value is not None:\n selection_params[name] = value\n\n return selection_params", "def _build(self, **kwargs):", "def _build(self):", "def _build(self):", "def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)", "def build_options(self, identifier: Optional[str]) -> BuildOptions:\n\n with self.reader.identifier(identifier):\n before_all = self.reader.get(\"before-all\", sep=\" && \")\n\n build_frontend_str = self.reader.get(\"build-frontend\", env_plat=False)\n environment_config = self.reader.get(\n \"environment\", table={\"item\": '{k}=\"{v}\"', \"sep\": \" \"}\n )\n environment_pass = self.reader.get(\"environment-pass\", sep=\" \").split()\n before_build = self.reader.get(\"before-build\", sep=\" && \")\n repair_command = self.reader.get(\"repair-wheel-command\", sep=\" && \")\n\n dependency_versions = self.reader.get(\"dependency-versions\")\n test_command = self.reader.get(\"test-command\", sep=\" && \")\n before_test = self.reader.get(\"before-test\", sep=\" && \")\n test_requires = self.reader.get(\"test-requires\", sep=\" \").split()\n test_extras = self.reader.get(\"test-extras\", sep=\",\")\n build_verbosity_str = self.reader.get(\"build-verbosity\")\n\n build_frontend: BuildFrontend\n if build_frontend_str == \"build\":\n build_frontend = \"build\"\n elif build_frontend_str == \"pip\":\n build_frontend = \"pip\"\n else:\n msg = f\"cibuildwheel: Unrecognised build frontend '{build_frontend_str}', only 'pip' and 'build' are supported\"\n print(msg, file=sys.stderr)\n sys.exit(2)\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError):\n print(\n f'cibuildwheel: Malformed environment option \"{environment_config}\"',\n file=sys.stderr,\n )\n traceback.print_exc(None, sys.stderr)\n sys.exit(2)\n\n # Pass through environment variables\n if self.platform == \"linux\":\n for env_var_name in environment_pass:\n try:\n environment.add(env_var_name, os.environ[env_var_name])\n except KeyError:\n pass\n\n if dependency_versions == \"pinned\":\n dependency_constraints: Optional[\n DependencyConstraints\n ] = DependencyConstraints.with_defaults()\n elif dependency_versions == \"latest\":\n dependency_constraints = None\n else:\n dependency_versions_path = Path(dependency_versions)\n dependency_constraints = DependencyConstraints(dependency_versions_path)\n\n if test_extras:\n test_extras = f\"[{test_extras}]\"\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity_str)))\n except ValueError:\n build_verbosity = 0\n\n manylinux_images: Dict[str, str] = {}\n musllinux_images: Dict[str, str] = {}\n if self.platform == \"linux\":\n all_pinned_docker_images = _get_pinned_docker_images()\n\n for build_platform in MANYLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(\n f\"manylinux-{build_platform}-image\", ignore_empty=True\n )\n\n if not config_value:\n # default to manylinux2014\n image = pinned_images.get(\"manylinux2014\")\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n assert image is not None\n manylinux_images[build_platform] = image\n\n for build_platform in MUSLLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(f\"musllinux-{build_platform}-image\")\n\n if config_value is None:\n image = pinned_images[\"musllinux_1_1\"]\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n musllinux_images[build_platform] = image\n\n return BuildOptions(\n globals=self.globals,\n test_command=test_command,\n test_requires=test_requires,\n test_extras=test_extras,\n before_test=before_test,\n before_build=before_build,\n before_all=before_all,\n build_verbosity=build_verbosity,\n repair_command=repair_command,\n environment=environment,\n dependency_constraints=dependency_constraints,\n manylinux_images=manylinux_images or None,\n musllinux_images=musllinux_images or None,\n build_frontend=build_frontend,\n )", "def build(self):", "def build(self):", "def build(self):", "def build (self):\n raise NotImplementedError", "def _create_features_dropdown(self, name=_features_dropdown):\n fts = sorted(self.features)\n d = Select(options=fts, css_classes=[self._features_dropdown], name=name)\n return d", "def build(self):\n raise NotImplementedError", "def build():", "def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)", "def build(self, spec, prefix):\n make()" ]
[ "0.6188935", "0.59599125", "0.5944895", "0.55326456", "0.5366692", "0.5328713", "0.53118664", "0.53038955", "0.5270639", "0.5265871", "0.5262976", "0.5192852", "0.518418", "0.51645607", "0.51583874", "0.5150827", "0.5110556", "0.5098783", "0.5098783", "0.50898474", "0.50657874", "0.504695", "0.504695", "0.504695", "0.50376", "0.5031194", "0.5024653", "0.5015964", "0.5003452", "0.49741095" ]
0.72015435
0
Builds the traversal spec object.
def build_traversal_spec(client_factory, name, spec_type, path, skip, select_set): traversal_spec = client_factory.create('ns0:TraversalSpec') traversal_spec.name = name traversal_spec.type = spec_type traversal_spec.path = path traversal_spec.skip = skip traversal_spec.selectSet = select_set return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\",\r\n \"hostFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting to vmFolder from datacenter\r\n dc_to_vmf = build_traversal_spec(client_factory, \"dc_to_vmf\", \"Datacenter\",\r\n \"vmFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting Host System to virtual machine\r\n h_to_vm = build_traversal_spec(client_factory, \"h_to_vm\", \"HostSystem\",\r\n \"vm\", False,\r\n [visit_folders_select_spec])\r\n\r\n # For getting to Host System from Compute Resource\r\n cr_to_h = build_traversal_spec(client_factory, \"cr_to_h\",\r\n \"ComputeResource\", \"host\", False, [])\r\n\r\n # For getting to datastore from Compute Resource\r\n cr_to_ds = build_traversal_spec(client_factory, \"cr_to_ds\",\r\n \"ComputeResource\", \"datastore\", False, [])\r\n\r\n rp_to_rp_select_spec = build_selection_spec(client_factory, \"rp_to_rp\")\r\n rp_to_vm_select_spec = build_selection_spec(client_factory, \"rp_to_vm\")\r\n # For getting to resource pool from Compute Resource\r\n cr_to_rp = build_traversal_spec(client_factory, \"cr_to_rp\",\r\n \"ComputeResource\", \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to child res pool from the parent res pool\r\n rp_to_rp = build_traversal_spec(client_factory, \"rp_to_rp\", \"ResourcePool\",\r\n \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to Virtual Machine from the Resource Pool\r\n rp_to_vm = build_traversal_spec(client_factory, \"rp_to_vm\", \"ResourcePool\",\r\n \"vm\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # Get the assorted traversal spec which takes care of the objects to\r\n # be searched for from the root folder\r\n traversal_spec = build_traversal_spec(client_factory, \"visitFolders\",\r\n \"Folder\", \"childEntity\", False,\r\n [visit_folders_select_spec, dc_to_hf,\r\n dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,\r\n rp_to_rp, h_to_vm, rp_to_vm])\r\n return traversal_spec", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def build(self, spec, prefix):\n make()", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def build_tree(self, prefix, depth):\n for count, function in [[self.n_files, self.make_file],\n [self.n_children, self.make_child_recurse],\n [self.n_symlinks, self.make_symlink]]:\n for i in range(count):\n if not self.can_continue():\n return\n name = os.path.join(prefix, self.name_gen.next())\n function(name, depth)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n self.loss = self.loss + self.gamma*self.cluster_layer(self.walker_layer)\n self.loss = self.loss + self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def _build_impl(self):", "def _build(self):", "def _build(self):", "def build(root):", "def path_builder(self) -> CaseEvent.TestPathBuilder:\n return self._path_builder", "def build(self) -> None:", "def build_tree(v, pop_obs_spec, stat_vars, vertical_idx):\n\n # vertical as the root\n root = {\n 'sv': ['top'],\n 'l': text_format.format_title(v),\n 't': 'p',\n 'c': 0, # count of child nodes\n 'cd': [],\n 'sv_set': set(), # used for counting child nodes\n }\n # specs with 0 constaints are of type \"value\",\n # as the level 1 cd of root\n for pos in pop_obs_spec[0]:\n ui_node = util.UiNode(pos, {}, False)\n childStatsVars = []\n # find all the statsvars belong to the node\n for sv in stat_vars[pos.key]:\n if pos.cpv == sv.pv:\n childStatsVars.append(sv.dcid)\n root['c'] += 1\n if len(childStatsVars) > 0:\n root['cd'].append({\n 'populationType': ui_node.pop_type,\n 'sv': childStatsVars,\n 'l': text_format.format_title(ui_node.text),\n 't': 'v',\n 'c': len(childStatsVars),\n 'mprop': ui_node.mprop,\n })\n\n # build specs with >= 1 constraints recursively\n for pos in pop_obs_spec[1]:\n child = build_tree_recursive(pos, 1, pop_obs_spec, stat_vars,\n )\n # For certain branch, we would like to put them under 0 pv nodes:\n if (pos.pop_type in ['EarthquakeEvent', 'CycloneEvent',\n 'MortalityEvent']):\n for pv0 in root['cd']:\n # hoist logic will break if multiple 0 pv\n if (pv0['populationType'] == pos.pop_type and pv0['mprop'] == 'count'):\n if 'cd' not in pv0:\n pv0['cd'] = []\n pv0['cd'].append(child)\n if 'sv_set' not in pv0:\n pv0['sv_set'] = set()\n pv0['sv_set'] |= child['sv_set']\n break\n else:\n root['cd'].append(child)\n root['sv_set'] |= child['sv_set']\n del child['sv_set']\n\n # update the count\n for pv0 in root['cd']:\n if 'sv_set' in pv0:\n pv0['c'] += len(pv0['sv_set'])\n del pv0['sv_set']\n root['c'] += len(root['sv_set'])\n del root['sv_set']\n statsvar_path = {}\n return traverseTree(root, [vertical_idx], statsvar_path)", "def build(_):", "def build():", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.gamma*self.cluster_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def startElement(self, name, attrs): # creating the node along the path being tracked\n name = clean_node_name(name)\n p_attrs = process_attrs(attrs)\n\n if name == \"\":\n raise ValueError, \"XML Node name cannot be empty\"\n\n elif name == \"requirement\":\n self.obj_depth.append(requirement_q2class(p_attrs))\n\n elif name == \"prompt\":\n self.obj_depth.append(prompt_q2class(p_attrs))\n\n elif name == \"house\":\n self.obj_depth.append(house_q2class(p_attrs))\n\n elif name == \"visible\":\n self.obj_depth.append(visible_q2class(p_attrs))\n\n elif name == \"computer\":\n self.obj_depth.append(computer_q2class(p_attrs))\n\n elif name == \"special\":\n self.obj_depth.append(special_q2class(p_attrs))\n\n elif name == \"title\":\n self.obj_depth.append(title_q2class(p_attrs))\n\n elif name == \"memories\":\n self.obj_depth.append(memories_q2class(p_attrs))\n\n elif name == \"tip\":\n self.obj_depth.append(tip_q2class(p_attrs))\n\n elif name == \"score\":\n self.obj_depth.append(score_q2class(p_attrs))\n\n elif name == \"exit\":\n self.obj_depth.append(exit_q2class(p_attrs))\n\n elif name == \"inventory\":\n self.obj_depth.append(inventory_q2class(p_attrs))\n\n elif name == \"memory\":\n self.obj_depth.append(memory_q2class(p_attrs))\n\n elif name == \"prereq\":\n self.obj_depth.append(prereq_q2class(p_attrs))\n\n elif name == \"desc\":\n self.obj_depth.append(desc_q2class(p_attrs))\n\n elif name == \"room\":\n self.obj_depth.append(room_q2class(p_attrs))\n\n elif name == \"player\":\n self.obj_depth.append(player_q2class(p_attrs))\n\n elif name == \"l\":\n self.obj_depth.append(l_q2class(p_attrs))\n\n elif name == \"o\":\n self.obj_depth.append(o_q2class(p_attrs))\n\n elif name == \"item\":\n self.obj_depth.append(item_q2class(p_attrs))\n\n elif name == \"intro\":\n self.obj_depth.append(intro_q2class(p_attrs))\n\n elif name == \"t\":\n self.obj_depth.append(t_q2class(p_attrs))\n\n self.char_buffer = []\n self.last_processed = \"start\"", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n import time\n start_time = None\n end_time = None\n\n # Default duration for WiLab is 2 hours\n duration_default = 120\n for lease in leases:\n if 'end_time' in lease:\n end_time = lease['end_time']\n start_time = lease['start_time']\n break\n\n if start_time is None:\n # start_time = Now\n start_time = time.time()\n\n if end_time is None:\n end_time = int(start_time + duration_default*60)\n #raise Exception, \"end_time is mandatory in leases\"\n\n # duration in seconds from now till end_time\n duration = end_time - start_time\n # duration in minutes\n duration = duration / 60\n duration = int(duration)\n if duration < duration_default:\n duration = duration_default\n Log.tmp(\"start_time = \",start_time)\n Log.tmp(\"end_time = \",end_time)\n Log.tmp(\"duration = \",duration)\n # RSpec will have expires date = now + duration\n rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n i = 0\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n\n # The only change for WiLab compared to Generic SFAWrapParser\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],cm[1])\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource['client_id'] = \"PC\" + str(i)\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n i = i + 1\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n ##print \"sfa_leases\", sfa_leases\n #if sfa_leases:\n # # SFAWRAP BUG ???\n # # rspec.version.add_leases bugs with an empty set of leases\n # # slice_id = leases[0]['slice_id']\n # # TypeError: list indices must be integers, not str\n # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n return rspec.toxml()", "def build(self):\n raise NotImplementedError", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def _build_octree(self):\n\n # cleanup old tree\n self._nodes_positions = []\n self._nodes_mass = []\n self._nodes_sizes = []\n self._nodes_children_types = []\n self._nodes_children_ids = []\n\n min_pos = np.min(self._positions)\n max_pos = np.max(self._positions)\n\n self._build_octree_branch(\n bodies=list(range(self.bodies)),\n coords_min=np.array([min_pos] * 3),\n coords_max=np.array([max_pos] * 3)\n )", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up" ]
[ "0.6779343", "0.64210516", "0.57740533", "0.55809647", "0.5465794", "0.5431072", "0.5333568", "0.52883077", "0.5245698", "0.52180934", "0.5119556", "0.5117688", "0.5117688", "0.5085158", "0.504372", "0.5026567", "0.5005321", "0.4998705", "0.4986079", "0.49831903", "0.49652678", "0.49404657", "0.49230203", "0.49208888", "0.49124372", "0.49109963", "0.4906557", "0.4897252", "0.4896741", "0.48877546" ]
0.70819336
0
Builds the Recursive Traversal Spec to traverse the object managed object hierarchy.
def build_recursive_traversal_spec(client_factory): visit_folders_select_spec = build_selection_spec(client_factory, "visitFolders") # For getting to hostFolder from datacenter dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", "hostFolder", False, [visit_folders_select_spec]) # For getting to vmFolder from datacenter dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter", "vmFolder", False, [visit_folders_select_spec]) # For getting Host System to virtual machine h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem", "vm", False, [visit_folders_select_spec]) # For getting to Host System from Compute Resource cr_to_h = build_traversal_spec(client_factory, "cr_to_h", "ComputeResource", "host", False, []) # For getting to datastore from Compute Resource cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds", "ComputeResource", "datastore", False, []) rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp") rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm") # For getting to resource pool from Compute Resource cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp", "ComputeResource", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to child res pool from the parent res pool rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to Virtual Machine from the Resource Pool rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool", "vm", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # Get the assorted traversal spec which takes care of the objects to # be searched for from the root folder traversal_spec = build_traversal_spec(client_factory, "visitFolders", "Folder", "childEntity", False, [visit_folders_select_spec, dc_to_hf, dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]) return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "def recurse(self):\n url = self._api + '?recursive=1'\n json = self._json(self._get(url), 200)\n return Tree(json, self._session) if json else None", "def _build_tree(self, root, obj):\n\n if obj is None:\n return\n\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n element = root.add_child(attr.name)\n self._build_tree(element, getattr(obj, attr_name))\n elif isinstance(attr, XmlAttributeProperty):\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def build_tree(self, prefix, depth):\n for count, function in [[self.n_files, self.make_file],\n [self.n_children, self.make_child_recurse],\n [self.n_symlinks, self.make_symlink]]:\n for i in range(count):\n if not self.can_continue():\n return\n name = os.path.join(prefix, self.name_gen.next())\n function(name, depth)", "def _get_object_subtree(self):\n raise NotImplementedError", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def work_tree(obj, **kwargs):\n max_depth = 0\n exclusions = kwargs.get('exclusions', {\"groups\": [], \"classes\": [], \"params\": []})\n groups_done = {}\n classes = {\"depths\": {}, \"content\": {}}\n params = {\"depths\": {}, \"content\": {}}\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n while to_index:\n (obj, depth) = to_index.pop()\n if obj.name in groups_done and groups_done[obj.name] <= depth:\n continue\n\n objclasses = obj.classes.exclude(classname__in=exclusions['classes'])\n updated_classes = update_values(objclasses, \"classname\", \"classparams\", depth=depth, results=classes)\n\n objparams = obj.parameters.exclude(paramkey__in=exclusions['params'])\n updated_params = update_values(objparams, \"paramkey\", \"paramvalue\", depth=depth, results=params)\n\n if not updated_classes or not updated_params:\n return (\"Fail\", \"Fail\")\n\n groups_done[obj.name] = depth\n depth += 1\n for group in obj.groups.exclude(name__in=exclusions['groups']):\n to_index.append((group, depth))\n if max_depth < depth:\n max_depth = depth\n\n params[\"content\"]['max_depth'] = max_depth\n params[\"content\"]['done_count'] = len(groups_done)\n return (classes[\"content\"], params[\"content\"])", "def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def make_drs_tree(self):\n pass", "def work_tree2(obj, **kwargs):\n if 'exclusions' in kwargs:\n exclusions = kwargs['exclusions']\n else:\n exclusions = Exclusions([], [], [])\n #groups_done = {}\n classes = NodeResults(nodetype='classes')\n params = NodeResults(nodetype='params')\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n # loop opts\n index_pop = to_index.pop\n index_extend = to_index.extend\n egroups, eclasses, eparams = exclusions\n add_classes = classes.add_entries\n add_params = params.add_entries\n\n while to_index:\n (obj, depth) = index_pop()\n #objname = obj.name\n #if objname in groups_done and groups_done[objname] <= depth:\n #continue\n try:\n objclasses = obj.classes.exclude(classname__in=eclasses)\n add_classes(objclasses, \"classname\", \"classparams\", depth)\n objparams = obj.parameters.exclude(paramkey__in=eparams)\n add_params(objparams, \"paramkey\", \"paramvalue\", depth)\n except RuntimeError, e:\n return (\"Fail\", \"Fail\") # or just let it bubble up to the caller\n\n #groups_done[objname] = depth\n depth += 1\n children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)]\n index_extend(children)\n\n return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)", "def asciitree(obj,depth=0,wide=2,last=[],recursed=False):\n\tcorner = u'\\u251C'\n\tcorner_end = u'\\u2514'\n\thorizo,horizo_bold = u'\\u2500',u'\\u2501'\n\tvertic,vertic_bold = u'\\u2502',u'\\u2503'\n\ttl,tr,bl,br = u'\\u250F',u'\\u2513',u'\\u2517',u'\\u251B'\n\tspacer_both = dict([(k,{\n\t\t0:'\\n',1:(' '*(wide+1)*(depth-1)+c+horizo*wide),\n\t\t2:' '*(wide+1)*(depth-1)}[depth] if depth <= 1 \n\t\telse (''.join([(vertic if d not in last else ' ')+\n\t\t' '*wide for d in range(1,depth)]))+c+horizo*wide) \n\t\tfor (k,c) in [('mid',corner),('end',corner_end)]])\n\tspacer = spacer_both['mid']\n\tif type(obj) in [float,int,bool]+str_types_list:\n\t\tif depth == 0: print(spacer+str(obj)+'\\n'+horizo*len(obj))\n\t\telse: print(spacer+str(obj))\n\telif isinstance(obj,dict) and all([type(i) in [str,float,int,bool] for i in obj.values()]) and depth==0:\n\t\tasciitree({'HASH':obj},depth=1,recursed=True)\n\telif type(obj) in [list,tuple]:\n\t\tfor ind,item in enumerate(obj):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(item) in [float,int,bool]+str_types_list: print(spacer_this+str(item))\n\t\t\telif item != {}:\n\t\t\t\tprint(spacer_this+'('+str(ind)+')')\n\t\t\t\tasciitree(item,depth=depth+1,\n\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\trecursed=True)\n\t\t\telse: print('unhandled tree object %s'%item)\n\telif isinstance(obj,dict) and obj != {}:\n\t\tfor ind,key in enumerate(obj.keys()):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(obj[key]) in [float,int,bool]+str_types_list: print(spacer_this+str(key)+' = '+str(obj[key]))\n\t\t\t# special: print single-item lists of strings on the same line as the key\n\t\t\telif type(obj[key])==list and len(obj[key])==1 and type(obj[key][0]) in [str,float,int,bool]:\n\t\t\t\tprint(spacer_this+key+' = '+str(obj[key]))\n\t\t\t# special: skip lists if blank dictionaries\n\t\t\telif type(obj[key])==list and all([i=={} for i in obj[key]]):\n\t\t\t\tprint(spacer_this+key+' = (empty)')\n\t\t\telif obj[key] != {}:\n\t\t\t\t# fancy border for top level\n\t\t\t\tif depth == 0:\n\t\t\t\t\tprint('\\n'+tl+horizo_bold*(len(key)+0)+\n\t\t\t\t\t\ttr+spacer_this+vertic_bold+str(key)+vertic_bold+'\\n'+\\\n\t\t\t\t\t\tbl+horizo_bold*len(key)+br+'\\n'+vertic)\n\t\t\t\telif obj[key]==None: print(spacer_this+key+' = None')\n\t\t\t\telse: print(spacer_this+key)\n\t\t\t\tif obj[key]!=None: \n\t\t\t\t\tasciitree(obj[key],depth=depth+1,\n\t\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\t\trecursed=True)\n\t\t\telif type(obj[key])==list and obj[key]==[]:\n\t\t\t\tprint(spacer_this+'(empty)')\n\t\t\telif obj[key]=={}: print(spacer_this+'%s = {}'%key)\n\t\t\telse: print('unhandled tree object %s'%key)\n\telse: print('unhandled tree object %s'%obj)\n\tif not recursed: print('\\n')", "async def test_async_browse_children() -> None:\n # pylint: disable=too-many-statements\n requester = UpnpTestRequester(RESPONSE_MAP)\n factory = UpnpFactory(requester)\n device = await factory.async_create_device(\"http://dlna_dms:1234/device.xml\")\n notify_server = UpnpTestNotifyServer(\n requester=requester,\n source=(\"192.168.1.2\", 8090),\n )\n event_handler = notify_server.event_handler\n profile = DmsDevice(device, event_handler=event_handler)\n\n # Object 0 is the root and must always exist\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_0.xml\"),\n )\n result = await profile.async_browse_direct_children(\"0\")\n assert result.number_returned == 4\n assert result.total_matches == 4\n assert result.update_id == 2333\n children = result.result\n assert len(children) == 4\n assert children[0].title == \"Browse Folders\"\n assert children[0].id == \"64\"\n assert children[0].child_count == \"4\"\n assert children[1].title == \"Music\"\n assert children[1].id == \"1\"\n assert children[1].child_count == \"7\"\n assert children[2].title == \"Pictures\"\n assert children[2].id == \"3\"\n assert children[2].child_count == \"5\"\n assert children[3].title == \"Video\"\n assert children[3].id == \"2\"\n assert children[3].child_count == \"3\"\n\n # Object 2 will give some different results\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_2.xml\"),\n )\n result = await profile.async_browse_direct_children(\"2\")\n assert result.number_returned == 3\n assert result.total_matches == 3\n assert result.update_id == 2333\n children = result.result\n assert len(children) == 3\n assert children[0].title == \"All Video\"\n assert children[0].id == \"2$8\"\n assert children[0].child_count == \"583\"\n assert children[1].title == \"Folders\"\n assert children[1].id == \"2$15\"\n assert children[1].child_count == \"2\"\n assert children[2].title == \"Recently Added\"\n assert children[2].id == \"2$FF0\"\n assert children[2].child_count == \"50\"\n\n # Object that is an item and not a container\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_item.xml\"),\n )\n result = await profile.async_browse_direct_children(\"1$6$35$1$1\")\n assert result.number_returned == 0\n assert result.total_matches == 0\n assert result.update_id == 2333\n assert result.result == []\n\n # Bad object ID should result in a UpnpError (HTTP 701: No such object)\n requester.exceptions.append(UpnpResponseError(status=701))\n with pytest.raises(UpnpResponseError) as err:\n await profile.async_browse_direct_children(\"no object\")\n\n assert err.value.status == 701", "def test_help_on_objects(hlwm, path='', depth=8):\n help_txt = hlwm.call(['help', path]).stdout\n assert f\"Object '{path}'\" in help_txt\n\n if depth < 0:\n return\n\n for child in hlwm.list_children(path):\n newpath = (path + '.' + child).lstrip('.')\n test_help_on_objects(hlwm, path=newpath, depth=depth - 1)", "def __json__(self, **kwargs):\n return self.nestify(instance=self, **kwargs).tree", "def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def populateTree(self, obj, obj_name='', inspected_node_is_visible=None):\n logger.debug(\"populateTree with object id = 0x{:x}\".format(id(obj)))\n if inspected_node_is_visible is None:\n inspected_node_is_visible = (obj_name != '')\n self._inspected_node_is_visible = inspected_node_is_visible\n\n if self._inspected_node_is_visible:\n self._root_item = TreeItem(None, _('<invisible_root>'),\n _('<invisible_root>'), None)\n self._root_item.children_fetched = True\n self._inspected_item = TreeItem(obj, obj_name,\n obj_name, is_attribute=None)\n self._root_item.append_child(self._inspected_item)\n else:\n # The root itself will be invisible\n self._root_item = TreeItem(obj, obj_name,\n obj_name, is_attribute=None)\n self._inspected_item = self._root_item\n\n # Fetch all items of the root so we can\n # select the first row in the constructor.\n root_index = self.index(0, 0)\n self.fetchMore(root_index)", "def object_specs(self):\n if self._object_specs is None:\n self.object_specs = self.generate_object_specs()\n \n return self._object_specs", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def _recurse_children(self, offset):\n while offset < self.obj_offset + self.Length:\n item = obj.Object(\"VerStruct\", offset = offset, vm = self.obj_vm, parent = self)\n if item.Length < 1 or item.get_key() == None:\n raise StopIteration(\"Could not recover a key for a child at offset {0}\".format(item.obj_offset))\n yield item.get_key(), item.get_children()\n offset = self.offset_pad(offset + item.Length)\n raise StopIteration(\"No children\")", "def traverse(object, path, default=None, request=None):", "def getAllLinks(jsonData, propDict, refDict, prefix='', context=''):\n linkList = OrderedDict()\n # check keys in propertyDictionary\n # if it is a Nav property, check that it exists\n # if it is not a Nav Collection, add it to list\n # otherwise, add everything IN Nav collection\n # if it is a Complex property, check that it exists\n # if it is, recurse on collection or individual item\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['isNav']:\n insideItem = jsonData.get(item)\n if insideItem is not None:\n cType = propDict[key].get('isCollection') \n autoExpand = propDict[key].get('OData.AutoExpand',None) is not None or\\\n propDict[key].get('OData.AutoExpand'.lower(),None) is not None\n if cType is not None:\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n for cnt, listItem in enumerate(insideItem):\n linkList[prefix+str(item)+'.'+getType(propDict[key]['isCollection']) +\n '#' + str(cnt)] = (listItem.get('@odata.id'), autoExpand, cType, cSchema, listItem)\n else:\n cType = propDict[key]['attrs'].get('type')\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n linkList[prefix+str(item)+'.'+getType(propDict[key]['attrs']['name'])] = (\\\n insideItem.get('@odata.id'), autoExpand, cType, cSchema, insideItem)\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['realtype'] == 'complex':\n if jsonData.get(item) is not None:\n if propDict[key].get('isCollection') is not None:\n for listItem in jsonData[item]:\n linkList.update(getAllLinks(\n listItem, propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n else:\n linkList.update(getAllLinks(\n jsonData[item], propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n rsvLogger.debug(str(linkList))\n return linkList", "def dump_iteration_tree(obj):\n def _dump_iteration_tree(obj, f, tablevel):\n if is_instance(obj, Driver):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n for comp in obj.workflow:\n if is_instance(comp, Driver) or is_instance(comp, Assembly):\n _dump_iteration_tree(comp, f, tablevel + 3)\n else:\n f.write(' ' * (tablevel + 3))\n f.write(comp.get_pathname())\n f.write('\\n')\n elif is_instance(obj, Assembly):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n _dump_iteration_tree(obj.driver, f, tablevel + 3)\n f = cStringIO.StringIO()\n _dump_iteration_tree(obj, f, 0)\n return f.getvalue()", "def build_traversal_spec(client_factory, name, spec_type, path, skip,\r\n select_set):\r\n traversal_spec = client_factory.create('ns0:TraversalSpec')\r\n traversal_spec.name = name\r\n traversal_spec.type = spec_type\r\n traversal_spec.path = path\r\n traversal_spec.skip = skip\r\n traversal_spec.selectSet = select_set\r\n return traversal_spec", "def __init__(self, obj, datamodel=None):\n with RecursiveConverter.in_progress:\n self.obj = obj\n self.class_name = obj.__class__.__name__\n self.datamodel = datamodel\n self.is_root = datamodel is None\n if self.is_root:\n RecursiveConverter.converted_modules = {}\n RecursiveConverter.typedefs = []\n self.datamodel = VHDLModule('-', obj)\n\n # recursively convert all child modules\n self.childs = []\n\n def conv(self, node):\n if isinstance(node, VHDLList):\n if node.elements_compatible_typed:\n if isinstance(node.elems[0], VHDLModule):\n if self.is_compatible_with_converted_module(node.elems[0]):\n return\n self.childs.append(RecursiveConverter(node.elems[0].current, node.elems[0]))\n\n else:\n # dynamic list..need to convert all modules\n for x in node.elems:\n if isinstance(x, VHDLModule):\n if self.is_compatible_with_converted_module(x):\n return\n self.childs.append(RecursiveConverter(x.current, x))\n elif isinstance(node, VHDLModule):\n if self.is_compatible_with_converted_module(node):\n return\n self.childs.append(RecursiveConverter(node.current, node))\n\n if self.is_root:\n logger.info(f'Creating top.vhd ...')\n self.top_vhdl = TopGenerator(obj)\n\n # maybe some input/output is a convertible module?\n for node in self.inputs:\n conv(self, node)\n\n for node in self.outputs:\n conv(self, node)\n\n # iterate all functions and discover local variables that may need to be converted\n for x in self.obj.__dict__.values():\n if isinstance(x, PyhaFunc):\n for key, val in x.get_local_types().items():\n if isinstance(val, Hardware):\n node = init_vhdl_type(key, val)\n conv(self, node)\n\n # convert instance elements before the instance itself, recursive\n for node in self.datamodel.elems:\n conv(self, node)\n\n self.red_node = get_objects_rednode(obj)\n convert_name = self.get_module_converted_name(self.datamodel)\n logger.info(f'{convert_name} to VHDL ...')\n\n self.conv = convert(self.red_node, obj) # actual conversion happens here\n\n self.vhdl_conversion = str(self.conv)\n RecursiveConverter.converted_modules[convert_name] = (self.datamodel, self.vhdl_conversion)\n RecursiveConverter.typedefs.extend(self.conv.build_typedefs())", "def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])", "def findHierarchy(self):\n def __recursiveHelper(key_name, output, indent):\n if key_name in self.relations:\n for employee in self.relations[key_name].employees:\n output += \" \" * indent + str(employee) +\"\\n\"\n # return __recursiveHelper(employee, output, indent+1)\n __recursiveHelper(employee, output, indent+1)\n else:\n print(output)\n return output\n\n\n #experimenting with Iter() and next() iterators/generators\n #and a while loop in the recursive function:\n\n # def __recursiveHelper(key_name, output, indent):\n # if key_name in self.relations:\n # employees = iter(self.relations[key_name].employees)\n # employee = next(employees, \"stop\")\n # while employees and employee != 'stop':\n # output += \" \" * indent + str(employee) +\"\\n\"\n # __recursiveHelper(next(employees, \"stop\"), output, indent+1)\n # else:\n # employee = next(employees, \"stop\")\n #\n # else:\n # return output\n\n\n\n\n\n output = \"\"\n indent = -1\n # self.relations is a dictionary of manager-name string keys.\n # The employees of None are the top-ranking managers.\n # only issue:\n # having trouble returning the concatenated output\n # from the recursive function:\n return __recursiveHelper(None, output, indent+1)", "def nested_object_traversal(obj: any, leaf_function: Callable, leaf_type: type):\n if isinstance(obj, (list, tuple)):\n result = [Role.nested_object_traversal(elem, leaf_function, leaf_type) for elem in obj]\n return type(obj)(result)\n elif isinstance(obj, dict):\n return {\n k: Role.nested_object_traversal(v, leaf_function, leaf_type)\n for k, v in sorted(obj.items())\n }\n elif isinstance(obj, leaf_type):\n return leaf_function(obj)\n else:\n return obj" ]
[ "0.63169825", "0.577359", "0.5654486", "0.5466749", "0.5373837", "0.52702874", "0.5185981", "0.517904", "0.50982857", "0.50975597", "0.5090196", "0.5065294", "0.50485235", "0.50385857", "0.503404", "0.49969995", "0.4953721", "0.4944381", "0.49171883", "0.4908716", "0.49081615", "0.49032056", "0.49007183", "0.48865217", "0.48819378", "0.48814753", "0.48796782", "0.48725304", "0.48721215", "0.48710334" ]
0.6471344
0
Builds the Property Spec.
def build_property_spec(client_factory, type="VirtualMachine", properties_to_collect=["name"], all_properties=False): property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collect property_spec.type = type return property_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def build(self, spec, prefix):\n make()", "def get_prop_spec(client_factory, spec_type, properties):\r\n prop_spec = client_factory.create('ns0:PropertySpec')\r\n prop_spec.type = spec_type\r\n prop_spec.pathSet = properties\r\n return prop_spec", "def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def property_setup(self, properties):\n return properties", "def _create_property_field(property_, alias_dictionary):\n name_for_methods = property_['name_for_methods']\n\n assert property_['default_value'] is not None, \\\n ('MakeComputedStyleBase requires an default value for all fields, none specified '\n 'for property ' + property_['name'])\n\n if property_['field_template'] in alias_dictionary:\n alias_template = property_['field_template']\n for field in alias_dictionary[alias_template]:\n if field != 'name':\n property_[field] = alias_dictionary[alias_template][field]\n\n if property_['field_template'] == 'keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n assert property_['field_size'] is None, \\\n (\"'\" + property_['name'] + \"' is a keyword field, \"\n \"so it should not specify a field_size\")\n size = int(math.ceil(math.log(len(property_['keywords']), 2)))\n elif property_['field_template'] == 'multi_keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n size = len(property_['keywords']) - 1 # Subtract 1 for 'none' keyword\n elif property_['field_template'] == 'external':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n elif property_['field_template'] == 'primitive':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = 1 if type_name == 'bool' else property_[\"field_size\"] # pack bools with 1 bit.\n elif property_['field_template'] == 'pointer':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n else:\n assert property_['field_template'] == 'monotonic_flag', \"Please put a valid value for field_template\"\n type_name = 'bool'\n default_value = 'false'\n size = 1\n\n if property_['wrapper_pointer_name']:\n assert property_['field_template'] in ['pointer', 'external']\n if property_['field_template'] == 'external':\n type_name = '{}<{}>'.format(property_['wrapper_pointer_name'], type_name)\n\n return Field(\n 'property',\n name_for_methods,\n property_name=property_['name'],\n inherited=property_['inherited'],\n independent=property_['independent'],\n type_name=type_name,\n wrapper_pointer_name=property_['wrapper_pointer_name'],\n field_template=property_['field_template'],\n size=size,\n default_value=default_value,\n custom_copy=property_['custom_copy'],\n custom_compare=property_['custom_compare'],\n mutable=property_['mutable'],\n getter_method_name=property_['getter'],\n setter_method_name=property_['setter'],\n initial_method_name=property_['initial'],\n computed_style_custom_functions=property_['computed_style_custom_functions'],\n )", "def render_specification_properties(spec, newline='\\n', ignore_props=None, prepend_items=None, append_items=None):\n\n spec_prop_list = []\n if prepend_items is not None:\n spec_prop_list += prepend_items\n ignore_keys = [] if ignore_props is None else ignore_props\n # Add link properties\n if isinstance(spec, LinkSpec):\n spec_prop_list.append('**Target Type** %s' %\n RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(\n spec['target_type']),\n spec['target_type']))\n # Add dataset properties\n if isinstance(spec, DatasetSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n spec_prop_list.append('**Neurodata Type:** %s' % str(spec.data_type_def))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('linkable', None) is not None and 'linnkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add group properties\n if isinstance(spec, GroupSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n ntype = str(spec.data_type_def)\n spec_prop_list.append('**Neurodata Type:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(ntype),\n ntype))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('linkable', None) is not None and 'linkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add attribute spec properites\n if isinstance(spec, AttributeSpec):\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('required', None) is not None and 'required' not in ignore_keys:\n spec_prop_list.append('**Required:** %s' % str(spec['required']))\n if spec.get('value', None) is not None and 'value' not in ignore_keys:\n spec_prop_list.append('**Value:** %s' % str(spec['value']))\n if spec.get('default_value', None) is not None and 'default_value' not in ignore_keys:\n spec_prop_list.append('**Default Value:** %s' % str(spec['default_value']))\n\n # Add common properties\n if spec.get('default_name', None) is not None:\n spec_prop_list.append('**Default Name:** %s' % str(spec['default_name']))\n if spec.get('name', None) is not None:\n spec_prop_list.append('**Name:** %s' % str(spec['name']))\n\n # Add custom items if necessary\n if append_items is not None:\n spec_prop_list += append_items\n\n # Render the specification properties list\n spec_doc = ''\n if len(spec_prop_list) > 0:\n spec_doc += newline\n for dp in spec_prop_list:\n spec_doc += newline + '- ' + dp\n spec_doc += newline\n # Return the rendered list\n return spec_doc", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def _build_properties(self, k, v, definition):\n\n if isinstance(v, schema.Map):\n newdef = self._create_section(definition, k, term=k)\n\n if v.schema is None:\n # if it's a map for arbritary values, only include description\n field = nodes.line('', v.description)\n newdef.append(field)\n return\n\n newdeflist = self._create_def_list(newdef)\n\n sorted_schema = sorted(v.schema.items(),\n key=cmp_to_key(self._sort_by_type))\n for key, value in sorted_schema:\n self._build_properties(key, value, newdeflist)\n elif isinstance(v, schema.List):\n newdef = self._create_section(definition, k, term=k)\n\n # identify next section as list properties\n field = nodes.line()\n emph = nodes.emphasis('', 'List properties:')\n field.append(emph)\n newdef.append(field)\n\n newdeflist = self._create_def_list(newdef)\n\n self._build_properties('**', v.schema['*'], newdeflist)\n else:\n newdef = self._create_section(definition, k, term=k)\n if 'description' in v:\n field = nodes.line('', v['description'])\n newdef.append(field)\n else:\n field = nodes.line('', '++')\n newdef.append(field)", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def create_property(self, key, prop):\n\n setting = self.new_property(key, prop)\n setting.create()\n return setting", "def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)", "def __init__(self, spec):\n self.spec = spec", "def test_build_property(self):\n v1 = versions.Version(version='1.2.3.4', name='foo')\n expected = 4\n\n self.assertEqual(v1.build, expected)", "def _makeProperty( key, value ):\r\n property = PropertyValue()\r\n property.Name = key\r\n property.Value = value\r\n return property", "def _create_properties(self): # pylint: disable=no-self-use\n properties = {}\n properties[\"product\"] = \"eventhub.python\"\n properties[\"version\"] = __version__\n properties[\"framework\"] = \"Python {}.{}.{}\".format(*sys.version_info[0:3])\n properties[\"platform\"] = sys.platform\n return properties", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def write_properties(self, prop_filename):\n # Collect list of all keys in self.plats that have True values,\n # but change \"windows\" to \"win64\" because build-sanity is annoying.\n sanity_plats = [\n (x if x != \"windows\" else \"win64\")\n for x in self.plats.keys() if self.plats[x]\n ]\n with open(prop_filename, \"w\") as prop:\n prop.write(\"CURRENT_BUILD_NUMBER={}\\n\".format(self.bld_num))\n prop.write(\"VERSION={}\\n\".format(self.version))\n prop.write(\"DISTROS={}\\n\".format(\" \".join(sanity_plats)))\n prop.write(\"TESTRUNNER_BRANCH={}\\n\".format(self.testrunner_branch))\n if self.use_magma:\n prop.write(\"EXTRA_TEST_PARAMS={}\\n\".format(\"bucket_storage=magma\"))", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def test_should_return_correct_gremlin_for_property(self):\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(true).makePropertyKey()'\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).indexed().makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.property_spec['indexed'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).unique().makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.property_spec['unique'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected, prop.gremlin", "def __init__(self, property_name='', *protocol_ids):\n\n self._full_path = ''\n\n if len(property_name) > 0 or len(protocol_ids) > 0:\n self._from_components(property_name, *protocol_ids)\n\n else:\n self._full_path = '{}'.format(ProtocolPath.property_separator)", "def __init__(self, value):\n if isinstance(value, bool):\n ptr = self.ffi.chfl_property_bool(c_bool(value))\n elif isinstance(value, (float, int)):\n ptr = self.ffi.chfl_property_double(c_double(value))\n elif isinstance(value, str):\n ptr = self.ffi.chfl_property_string(value.encode(\"utf8\"))\n elif _is_vector3d(value):\n value = chfl_vector3d(value[0], value[1], value[2])\n ptr = self.ffi.chfl_property_vector3d(value)\n else:\n raise ChemfilesError(\n f\"can not create a Property with a value of type '{type(value)}'\"\n )\n\n super(Property, self).__init__(ptr, is_const=False)", "def __init__(self, jsondict=None, strict=True):\n \n self.type = None\n \"\"\" Code that specifies the property DeviceDefinitionPropetyCode\n (Extensible).\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.valueCode = None\n \"\"\" Property value as a code, e.g., NTP4 (synced to NTP).\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.valueQuantity = None\n \"\"\" Property value as a quantity.\n List of `Quantity` items (represented as `dict` in JSON). \"\"\"\n \n super(DeviceDefinitionProperty, self).__init__(jsondict=jsondict, strict=strict)", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def make_pod_spec(self):\n spec = {\n 'containers': [{\n 'name': self.framework.model.app.name,\n 'imageDetails': {\n },\n 'ports': [{\n 'containerPort':\n self.framework.model.config['advertised-port'],\n 'protocol': 'TCP',\n }],\n }],\n }\n return spec" ]
[ "0.6053929", "0.595817", "0.5930383", "0.5877144", "0.5749173", "0.56912214", "0.5589146", "0.5584564", "0.5541774", "0.5472907", "0.54651445", "0.53387296", "0.5295169", "0.5217021", "0.5211193", "0.51826376", "0.51688266", "0.5148735", "0.5138071", "0.5137829", "0.51148397", "0.5113543", "0.51090896", "0.5107701", "0.50745106", "0.50550956", "0.50529927", "0.5051491", "0.50379395", "0.5028919" ]
0.69546396
0
Builds the Property Filter Spec.
def build_property_filter_spec(client_factory, property_specs, object_specs): property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterRevProp, self).__init__(*args, **kwargs)\n\n # Construct regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Save the revision property details.\n self.propname = self.context.tokens['RevPropName']\n logger.debug('propname = {0}'.format(self.propname))\n self.propvalue = self.context.tokens['RevPropValue']\n logger.debug('propvalue = \"{0}\"'.format(self.propvalue))", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def _generate_stats(self, host_state, filter_properties):\n\n filter_function = None\n\n if ('filter_function' in host_state.capabilities and\n host_state.capabilities['filter_function'] is not None):\n filter_function = str(\n host_state.capabilities['filter_function'])\n\n stats = utils.generate_stats(host_state, filter_properties)\n\n stats['filter_function'] = filter_function\n\n return stats", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def _write_filter_params(self, spec):\n spec.switch_write_focus(self.REGIONS.FILTER_PARAMS.value)\n for param in self._filter_params:\n spec.write_value(param, data_type=DataType.FLOAT_64)", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def get_filters(self):", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeClass = kwargs.get(\"rspSubtreeClass\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n orderBy = kwargs.get(\"orderBy\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeClass is not None:\n opts+= \"&rsp-subtree-class=%s\" % rspSubtreeClass\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n if orderBy is not None:\n opts+= \"&order-by=%s\" % orderBy\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def __init__(self, filter_spec = [ [{},False] ]):\n\n Qt.QObject.__init__(self)\n\n\n # key = property name of Element object\n # value = displayed column name for tables showing choices and matches\n self.elem_property_vs_col_name = \\\n {'name':'Name', 'devname':'Dev. Name', 'cell':'Cell',\n 'family':'Family', 'girder':'Girder', 'group':'Group',\n 'index':'Lat. Index', 'length':'Eff.Len', 'phylen':'Phys. Len.',\n 'pv':'PV', 'sb':'sb', 'se':'se', 'symmetry':'Symmetry',\n 'virtual':'Virtual', 'sequence':'Sequence'}\n\n # key = property name of Element object & exclusion flag\n # value = displayed column name for table showing filters\n self.filter_property_vs_col_name = \\\n self.elem_property_vs_col_name.copy()\n self.filter_property_vs_col_name.update({'exclude':'Excl.'}) # adding extra column\n\n # Specify the default column order you want for tables showing\n # choices and matches.\n self.elem_property_list = ['family', 'name', 'devname', 'cell',\n 'girder', 'symmetry', 'group', 'virtual',\n 'sb', 'se', 'pv', 'length', 'phylen',\n 'index', 'sequence']\n self.col_name_list = [self.elem_property_vs_col_name[prop]\n for prop in self.elem_property_list]\n self.choice_dict = dict.fromkeys(self.elem_property_list)\n\n # Specify the default column order you want for table showing\n # filters.\n self.filter_property_list = self.elem_property_list[:]\n self.filter_property_list.insert(0, 'exclude')\n self.filter_col_name_list = [self.filter_property_vs_col_name[prop]\n for prop in self.filter_property_list]\n self.filter_dict = dict.fromkeys(self.filter_property_list)\n\n self.numeric_filter_list = ['index', 'phylen', 'length', 'sb', 'se']\n self.not_implemented_filter_list = ['sequence']\n\n self.filter_spec = filter_spec\n\n self.allElements = ap.getElements('*')\n\n # Initialization of matching data information\n self.matched = [ [True]*len(self.allElements) ]\n self.combine_matched_list()\n self.update_choice_dict()\n\n # Apply initial filters provided by a user, if any.\n if self.filter_spec:\n isCaseSensitive = False\n self.filterData(range(len(self.filter_spec)), isCaseSensitive)\n\n self.selectedElements = []", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def build_filters(self, view, filters=None):\n query_builder = self.get_query_builder(backend=self, view=view)\n return query_builder.build_query(**(filters if filters else {}))", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f" ]
[ "0.6691381", "0.6613962", "0.6211153", "0.60873485", "0.59203535", "0.5826266", "0.5766887", "0.546501", "0.54321504", "0.5387858", "0.5352514", "0.5318498", "0.53072774", "0.52971756", "0.52805036", "0.5272654", "0.5242891", "0.5188393", "0.51839644", "0.5144999", "0.51025003", "0.5051847", "0.5035122", "0.50207436", "0.5019541", "0.50063837", "0.49401972", "0.492547", "0.48733547", "0.4867145" ]
0.76600534
0
Gets the properties of the Managed object specified.
def get_object_properties(vim, collector, mobj, type, properties): client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.get_service_content().propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = (properties is None or len(properties) == 0) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])", "def get_properties(self):\n return self.properties", "def properties_get(self):\n return self._get('properties')", "def get_properties(self):\n return self.properties", "def _get_managed_objects_properties(self, vim_type, properties=None):\n # Get Root Folder\n root_folder = self.content.rootFolder\n\n if properties is None:\n properties = ['name']\n\n # Create Container View with default root folder\n mor = self.content.viewManager.CreateContainerView(\n root_folder, [vim_type], True)\n\n # Create Traversal spec\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name=\"traversal_spec\",\n path='view',\n skip=False,\n type=vim.view.ContainerView\n )\n\n # Create Property Spec\n property_spec = vmodl.query.PropertyCollector.PropertySpec(\n type=vim_type, # Type of object to retrieved\n all=False,\n pathSet=properties\n )\n\n # Create Object Spec\n object_spec = vmodl.query.PropertyCollector.ObjectSpec(\n obj=mor,\n skip=True,\n selectSet=[traversal_spec]\n )\n\n # Create Filter Spec\n filter_spec = vmodl.query.PropertyCollector.FilterSpec(\n objectSet=[object_spec],\n propSet=[property_spec],\n reportMissingObjectsInResults=False\n )\n\n return self.content.propertyCollector.RetrieveContents([filter_spec])", "def getProperties(self):\n return self.properties", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def object_attributes(obj):\n return obj.__dict__.items()", "def get_properties():", "def properties(self):\n return self._props", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def getProperties():", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def getObjectProperty(self, owner: unicode, propertyName: unicode, saveableObjectClass: java.lang.Class, create: bool) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])", "def getPropertiesAll():", "def getObjectPropertyMap(self, propertyName: unicode) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def properties(self):\n\n return self._properties", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def test_get_objects_with_properties(self):\n expected_result = self.spec.get(\"test_get_objects_with_properties\")\n expected_type = expected_result.get(\"_type\")\n expected_datastore_list = []\n\n for each_datastore in expected_result.get(\"datastore_infos\"):\n datastore_name = each_datastore[\"name\"]\n expected_datastore_list.append(datastore_name)\n datastore_list = []\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'Datastore', \n 100, \n ['name'])\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n prop_dict = {}\n for prop in dynamic_properties:\n if prop.name == \"name\":\n datastore_list.append(prop.val)\n \n for each_ds_name in datastore_list:\n self.assertTrue(each_ds_name in datastore_list)", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def get_object_dimension_props(obj):\n props = eval(\"obj.\" + DIMENSION_PROPERTY_NAMESPACE)\n return props", "def properties(self):\n return PropertyManager(session=self._session)", "def properties(self):\n return self.properties_with_uid[1:]", "def get_properties(\n self,\n ins: common.GetPropertiesIns,\n timeout: Optional[float],\n ) -> common.GetPropertiesRes:\n get_properties_msg = serde.get_properties_ins_to_proto(ins)\n res_wrapper: ResWrapper = self.bridge.request(\n ins_wrapper=InsWrapper(\n server_message=ServerMessage(get_properties_ins=get_properties_msg),\n timeout=timeout,\n )\n )\n client_msg: ClientMessage = res_wrapper.client_message\n get_properties_res = serde.get_properties_res_from_proto(\n client_msg.get_properties_res\n )\n return get_properties_res", "def getProperties(groupId, contractId):\n\tprint \"Getting properties for group %s and contract %s\" % (groupId, contractId)\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties', property_parameters)\n\t\n\tif \"properties\" in property_result:\n\t\tproperty_items = property_result['properties']['items']\n\telse:\n\t\tproperty_items = []\n\n\treturn (property_items)" ]
[ "0.66242605", "0.6595597", "0.65718126", "0.65414923", "0.64137036", "0.63451725", "0.6315363", "0.6177473", "0.61558604", "0.61558604", "0.61421597", "0.61126566", "0.6106942", "0.6099833", "0.60941976", "0.6071322", "0.6071322", "0.6018031", "0.599956", "0.5993409", "0.59584665", "0.5901056", "0.5859067", "0.585625", "0.5852642", "0.58399284", "0.58364946", "0.5833915", "0.58327276", "0.5824847" ]
0.679616
0
Builds the Property Filter Spec Object.
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): prop_filter_spec = \ client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterRevProp, self).__init__(*args, **kwargs)\n\n # Construct regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Save the revision property details.\n self.propname = self.context.tokens['RevPropName']\n logger.debug('propname = {0}'.format(self.propname))\n self.propvalue = self.context.tokens['RevPropValue']\n logger.debug('propvalue = \"{0}\"'.format(self.propvalue))", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def _generate_stats(self, host_state, filter_properties):\n\n filter_function = None\n\n if ('filter_function' in host_state.capabilities and\n host_state.capabilities['filter_function'] is not None):\n filter_function = str(\n host_state.capabilities['filter_function'])\n\n stats = utils.generate_stats(host_state, filter_properties)\n\n stats['filter_function'] = filter_function\n\n return stats", "def get_filters(self):", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def __init__(self, filter_spec = [ [{},False] ]):\n\n Qt.QObject.__init__(self)\n\n\n # key = property name of Element object\n # value = displayed column name for tables showing choices and matches\n self.elem_property_vs_col_name = \\\n {'name':'Name', 'devname':'Dev. Name', 'cell':'Cell',\n 'family':'Family', 'girder':'Girder', 'group':'Group',\n 'index':'Lat. Index', 'length':'Eff.Len', 'phylen':'Phys. Len.',\n 'pv':'PV', 'sb':'sb', 'se':'se', 'symmetry':'Symmetry',\n 'virtual':'Virtual', 'sequence':'Sequence'}\n\n # key = property name of Element object & exclusion flag\n # value = displayed column name for table showing filters\n self.filter_property_vs_col_name = \\\n self.elem_property_vs_col_name.copy()\n self.filter_property_vs_col_name.update({'exclude':'Excl.'}) # adding extra column\n\n # Specify the default column order you want for tables showing\n # choices and matches.\n self.elem_property_list = ['family', 'name', 'devname', 'cell',\n 'girder', 'symmetry', 'group', 'virtual',\n 'sb', 'se', 'pv', 'length', 'phylen',\n 'index', 'sequence']\n self.col_name_list = [self.elem_property_vs_col_name[prop]\n for prop in self.elem_property_list]\n self.choice_dict = dict.fromkeys(self.elem_property_list)\n\n # Specify the default column order you want for table showing\n # filters.\n self.filter_property_list = self.elem_property_list[:]\n self.filter_property_list.insert(0, 'exclude')\n self.filter_col_name_list = [self.filter_property_vs_col_name[prop]\n for prop in self.filter_property_list]\n self.filter_dict = dict.fromkeys(self.filter_property_list)\n\n self.numeric_filter_list = ['index', 'phylen', 'length', 'sb', 'se']\n self.not_implemented_filter_list = ['sequence']\n\n self.filter_spec = filter_spec\n\n self.allElements = ap.getElements('*')\n\n # Initialization of matching data information\n self.matched = [ [True]*len(self.allElements) ]\n self.combine_matched_list()\n self.update_choice_dict()\n\n # Apply initial filters provided by a user, if any.\n if self.filter_spec:\n isCaseSensitive = False\n self.filterData(range(len(self.filter_spec)), isCaseSensitive)\n\n self.selectedElements = []", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def _write_filter_params(self, spec):\n spec.switch_write_focus(self.REGIONS.FILTER_PARAMS.value)\n for param in self._filter_params:\n spec.write_value(param, data_type=DataType.FLOAT_64)", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def _init_optimizer_params(self):\n order = [\n [Peaking.__name__, True, True], # Peaking\n [LowShelf.__name__, True, True], # Low shelfs\n [HighShelf.__name__, True, True], # High shelfs\n [Peaking.__name__, True, False], # Peaking with fixed q\n [LowShelf.__name__, True, False], # Low shelfs with fixed q\n [HighShelf.__name__, True, False], # High shelfs with fixed q\n [Peaking.__name__, False, True], # Peaking with fixed fc\n [LowShelf.__name__, False, True], # Low shelfs with fixed fc\n [HighShelf.__name__, False, True], # High shelfs with fixed fc\n [Peaking.__name__, False, False], # Peaking with fixed fc and q\n [LowShelf.__name__, False, False], # Low shelfs with fixed fc and q\n [HighShelf.__name__, False, False], # High shelfs with fixed fc and q\n ]\n\n def init_order(filter_ix):\n filt = self.filters[filter_ix]\n ix = order.index([filt.__class__.__name__, filt.optimize_fc, filt.optimize_q])\n val = ix * 100\n if filt.optimize_fc:\n val += 1 / np.log2(filt.max_fc / filt.min_fc)\n return val\n\n # Initialize filter params as list of empty lists, one per filter\n filter_params = [[]] * len(self.filters)\n # Indexes to self.filters sorted by filter init order\n filter_argsort = sorted(list(range(len(self.filters))), key=init_order, reverse=True)\n remaining_target = self.target.copy()\n for ix in filter_argsort: # Iterate sorted filter indexes\n filt = self.filters[ix] # Get filter\n filter_params[ix] = filt.init(remaining_target) # Init filter and place params to list of lists\n remaining_target -= filt.fr # Adjust target\n filter_params = np.concatenate(filter_params).flatten() # Flatten params list\n return filter_params" ]
[ "0.76403946", "0.65880555", "0.6298685", "0.6076335", "0.597899", "0.5965586", "0.5764983", "0.5655848", "0.56413084", "0.5557564", "0.5519107", "0.5488389", "0.54726166", "0.5452801", "0.544979", "0.5363146", "0.5314718", "0.51662815", "0.5132564", "0.5092276", "0.50902605", "0.5073957", "0.50721383", "0.5053869", "0.50466317", "0.5040117", "0.50374806", "0.50288486", "0.5009367", "0.5004019" ]
0.66637796
1
Gets the list of properties for the collection of objects of the type specified.
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) return vim.RetrieveProperties(vim.get_service_content().propertyCollector, specSet=[prop_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_objects(vim, type, properties_to_collect=[\"name\"], all=False):\r\n client_factory = vim.client.factory\r\n object_spec = build_object_spec(client_factory,\r\n vim.get_service_content().rootFolder,\r\n [build_recursive_traversal_spec(client_factory)])\r\n property_spec = build_property_spec(client_factory, type=type,\r\n properties_to_collect=properties_to_collect,\r\n all_properties=all)\r\n property_filter_spec = build_property_filter_spec(client_factory,\r\n [property_spec],\r\n [object_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[property_filter_spec])", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def supported_type_properties() -> List[TypeProperty]:\n types_props: List[TypeProperty] = []\n for det in PLACE_DETECTORS:\n types_props.extend(det.supported_types_and_properties())\n\n return types_props", "def collect_properties(service_instance, view_ref, obj_type, path_set=None,\n include_mors=False):\n collector = service_instance.content.propertyCollector\n\n # Create object specification to define the starting point of\n # inventory navigation\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec()\n obj_spec.obj = view_ref\n obj_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_ref.__class__\n obj_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = obj_type\n\n if not path_set:\n property_spec.all = True\n\n property_spec.pathSet = path_set\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [obj_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n props = collector.RetrieveContents([filter_spec])\n\n data = []\n for obj in props:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n\n if include_mors:\n properties['obj'] = obj.obj\n\n data.append(properties)\n return data", "def _PropList(self):\n prop_list = []\n\n if self.HASH_PROPERTIES is None and self.HASH_EXCLUDE is None:\n return prop_list\n\n # TODO(ckl): comprehensive list of \"internal\" properties\n exclude_list = self.HASH_EXCLUDE or tuple()\n exclude_list += metadata_api.GetFieldNames(self, ui_readonly=True)\n # TODO(raulg): The deleted can be removed from the exclude_list after all\n # records have been purged of deleted fields.\n exclude_list += ('deleted', 'key_subtype', 'key_order', 'key_name')\n\n for prop in self._properties:\n if '__' in prop and not prop.endswith('key_name'):\n continue\n if self.HASH_PROPERTIES is not None and prop not in self.HASH_PROPERTIES:\n continue\n if self.HASH_EXCLUDE is not None and prop in exclude_list:\n continue\n prop_list.append(prop)\n\n prop_list.sort()\n return prop_list", "def list_all_properties(self):\n properties = list(self.property_only_graph.nodes())\n properties = [SchemaProperty(_prop, self) for _prop in properties]\n return properties", "def get_objects(vim, type, properties_to_collect=None, all=False):\n if not properties_to_collect:\n properties_to_collect = [\"name\"]\n\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n vim.service_content.rootFolder,\n [trav_spec])\n property_spec = vim_util.build_property_spec(\n client_factory, type_=type,\n properties_to_collect=properties_to_collect,\n all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec],\n [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim,\n property_collector,\n [property_filter_spec])", "def getPropertiesAll():", "def _tp__get_typed_properties(self):\n try:\n return tuple(getattr(self, p) for p in self._tp__typed_properties)\n except AttributeError:\n raise NotImplementedError", "def get_object_properties(vim, collector, mobj, type, properties):\r\n client_factory = vim.client.factory\r\n if mobj is None:\r\n return None\r\n usecoll = collector\r\n if usecoll is None:\r\n usecoll = vim.get_service_content().propertyCollector\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = (properties is None or len(properties) == 0)\r\n property_spec.pathSet = properties\r\n property_spec.type = type\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = mobj\r\n object_spec.skip = False\r\n property_filter_spec.propSet = [property_spec]\r\n property_filter_spec.objectSet = [object_spec]\r\n return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])", "def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)", "def get_properties(self):\n return self.properties", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def ListPropertyValuesOfType(res_dict, prop, res_type):\n return [r['properties'][prop] for r in res_dict if r['type'] == res_type]", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def properties(self) -> List[TaskPropertyModel]:\n return self._properties", "def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def bson_properties(self):\n return []", "def getProperties(self):\n return self.properties", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def get_properties(self):\n return self.properties", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def getProperties(self, prop_colour):\n props = database_creator.db.query(\n \"SELECT name FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n properties = []\n for i in props:\n properties.append(i[\"name\"])\n return properties", "def get_properties():", "def get_instance_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_instance_property()]", "def properties(self):\n return self._props" ]
[ "0.7357272", "0.65244734", "0.6457637", "0.6373247", "0.6370086", "0.6297487", "0.62510055", "0.62469465", "0.6221552", "0.6219856", "0.6206821", "0.610221", "0.60433024", "0.60395074", "0.5994236", "0.59763896", "0.59744734", "0.59715617", "0.59705645", "0.5961555", "0.5959065", "0.59303087", "0.5918242", "0.59056026", "0.58923954", "0.58923954", "0.5886471", "0.584636", "0.58417064", "0.5829882" ]
0.74611396
0
Run `code` with profiler. Used by ``%prun`` and ``%run p``.
def _run_with_profiler(self, code, opts, namespace): # Fill default values for unspecified options: opts.merge(Struct(D=[''], l=[], s=['time'], T=[''])) prof = profile.Profile() try: prof = prof.runctx(code, namespace, namespace) sys_exit = '' except SystemExit: sys_exit = """*** SystemExit exception caught in code being profiled.""" stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s) lims = opts.l if lims: lims = [] # rebuild lims with ints/floats/strings for lim in opts.l: try: lims.append(int(lim)) except ValueError: try: lims.append(float(lim)) except ValueError: lims.append(lim) # Trap output. stdout_trap = StringIO() stats_stream = stats.stream try: stats.stream = stdout_trap stats.print_stats(*lims) finally: stats.stream = stats_stream output = stdout_trap.getvalue() output = output.rstrip() if 'q' not in opts: page.page(output) print(sys_exit, end=' ') dump_file = opts.D[0] text_file = opts.T[0] if dump_file: prof.dump_stats(dump_file) if text_file: with open(text_file, 'w') as pfile: pfile.write(output) if 'r' in opts: return stats else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile_code(profiler):\n print('\\n')\n ps = pstats.Stats(profiler).strip_dirs().sort_stats('cumulative')\n ps.print_stats(10)", "def runner(code, out_stream):\n code_obj = compiler.compile_source(code)\n vm = virtual_machine.VirtualMachine(out_stream)\n vm.run_code(code_obj)", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))", "def run(self, code='', file=''):\n if file and code:\n print('WARNING: reading file instead of the code')\n\n if file:\n source = Path(file)\n if source.exists():\n if not source.is_file():\n self.__abort(ERR_CODE_NOT_FILE)\n if file[len(file) - 3:] != EXTENSION:\n self.__abort(ERR_CODE_NOT_SOURCE)\n with source.open() as f:\n self.__code = f.read()\n else:\n self.__abort(ERR_CODE_FILE_MISSING)\n else:\n self.__code = code\n\n self.__tokenize()\n return self.__execute()", "def run_monitored_proc(code):\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\"Peak memory monitoring only works on Linux\")\n\n code = textwrap.dedent(code)\n process = subprocess.Popen([sys.executable, '-c', code])\n\n peak_memusage = -1\n\n start = time.time()\n while True:\n ret = process.poll()\n if ret is not None:\n break\n\n with open('/proc/%d/status' % process.pid, 'r') as f:\n procdata = f.read()\n\n m = re.search(r'VmRSS:\\s*(\\d+)\\s*kB', procdata, re.S | re.I)\n if m is not None:\n memusage = float(m.group(1)) * 1e3\n peak_memusage = max(memusage, peak_memusage)\n\n time.sleep(0.01)\n\n process.wait()\n\n duration = time.time() - start\n\n if process.returncode != 0:\n raise AssertionError(\"Running failed:\\n%s\" % code)\n\n return duration, peak_memusage", "def run_code(self, code: str, with_preprocess: bool = False,\n exception_list: Tuple = (), *args, **kwargs):\n # Get the path to the configuration file\n all_codes = get_all_codes(self.all_cfgs_dir)\n cfg_path = all_codes[code]\n # Run the experiment\n runner = self.get_runner()\n runner.merge_cfg(cfg_path)\n # Setup the outputs\n current_experiment_output_dir = os.path.join(self.hyper_experiment_path, f'exp-{code}')\n if not os.path.exists(current_experiment_output_dir):\n os.mkdir(current_experiment_output_dir)\n runner.set_output_dir(current_experiment_output_dir)\n\n # Run the experiment\n if self.verbose > 0:\n print(\"---\")\n print(\"This the configuration that will be used:\")\n print(runner.cfg)\n print(\"---\")\n runner.verbose = max(0, self.verbose - 1)\n try:\n if with_preprocess:\n runner.preprocess()\n score = runner.run(*args, **kwargs)\n except exception_list as e:\n warnings.warn(f\"Exception caught {e}\")\n score = None\n self.CACHE.LOAD()\n score_dict = self.CACHE.SET_IFN('score_dict', {})\n score_dict[code] = score\n self.CACHE.SET('score_dict', score_dict)\n self.CACHE.SAVE()\n runner.CACHE.RESET(prompt=False)\n return score", "def code():", "def run_code(code: List) -> Tuple[int, int]:\n executed_lines = set()\n\n prv_ptr, ins_ptr, acc = -1, 0, 0\n\n while True:\n if ins_ptr in executed_lines:\n break\n\n executed_lines.add(ins_ptr)\n\n cmd, args = code[ins_ptr]\n\n if cmd == \"acc\":\n acc += int(args)\n\n elif cmd == \"nop\":\n pass\n\n elif cmd == \"jmp\":\n prv_ptr = ins_ptr\n ins_ptr += int(args)\n continue\n\n prv_ptr = ins_ptr\n ins_ptr += 1\n\n else:\n # No loop detected\n return acc, -1\n\n return acc, ins_ptr", "def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)", "def run_code(plot_path, function_name, plot_code):\r\n # Change the working directory to the directory of the example, so\r\n # it can get at its data files, if any. Add its path to sys.path\r\n # so it can import any helper modules sitting beside it.\r\n if plot_code is not None:\r\n exec(plot_code)\r\n else:\r\n pwd = os.getcwd()\r\n path, fname = os.path.split(plot_path)\r\n sys.path.insert(0, os.path.abspath(path))\r\n stdout = sys.stdout\r\n sys.stdout = cStringIO.StringIO()\r\n os.chdir(path)\r\n fd = None\r\n try:\r\n fd = open(fname)\r\n module = imp.load_module(\r\n \"__plot__\", fd, fname, ('py', 'r', imp.PY_SOURCE))\r\n finally:\r\n del sys.path[0]\r\n os.chdir(pwd)\r\n sys.stdout = stdout\r\n if fd is not None:\r\n fd.close()\r\n\r\n if function_name is not None:\r\n getattr(module, function_name)()", "def exec_code(code, db, write=True):\n evaler = Evaluator(db, write=write)\n glb = {}\n loc = ExecutionContext(evaler=evaler)\n exec(code, glb, loc)", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def timeit_profile(stmt, number, repeat, setup,\n timer, pickle_protocol, dump_filename, mono, **_ignored):\n del _ignored\n sys.path.insert(0, os.curdir)\n globals_ = {}\n exec_(setup, globals_)\n if number is None:\n # determine number so that 0.2 <= total time < 2.0 like timeit.\n dummy_profiler = Profiler()\n dummy_profiler.start()\n for x in range(1, 10):\n number = 10 ** x\n t = time.time()\n for y in range(number):\n exec_(stmt, globals_)\n if time.time() - t >= 0.2:\n break\n dummy_profiler.stop()\n del dummy_profiler\n code = compile('for _ in range(%d): %s' % (number, stmt),\n 'STATEMENT', 'exec')\n __profile__(stmt, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "async def cli(self, code, *m):\n if self.bot.check_code(code):\n p = subprocess.run(args=m, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n await self.bot.say(codify(p.stdout.decode('utf-8'), p.stderr.decode('utf-8'), language='DOS'))\n else:\n await self.bot.reply('Bad code!')", "def runcode(self,code_obj):\n\n # Set our own excepthook in case the user code tries to call it\n # directly, so that the IPython crash handler doesn't get triggered\n old_excepthook,sys.excepthook = sys.excepthook, self.excepthook\n outflag = 1 # happens in more places, so it's easier as default\n try:\n try:\n exec code_obj in self.locals\n finally:\n # Reset our crash handler in place\n sys.excepthook = old_excepthook\n except SystemExit:\n self.resetbuffer()\n self.showtraceback()\n warn( __builtin__.exit,level=1)\n except self.custom_exceptions:\n etype,value,tb = sys.exc_info()\n self.CustomTB(etype,value,tb)\n except:\n self.showtraceback()\n else:\n outflag = 0\n if code.softspace(sys.stdout, 0):\n print\n # Flush out code object which has been run (and source)\n self.code_to_run = None\n self.code_to_run_src = ''\n return outflag", "def show_code(code):\n\n print('The code was: '+str(code))", "def run(self):\n if not self.__class__.profile is None:\n import cProfile\n cminstance = self\n cProfile.runctx('self._run()', globals(), locals(), self.__class__.profile)\n else:\n self._run()", "def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)", "def runSync(code):\n __PyMainThread__.runSync(code)\n sleep(0.1)", "def run_monitored(code):\n\n if hasattr(os, 'wait4'):\n return run_monitored_wait4(code)\n else:\n return run_monitored_proc(code)", "def profile_function(self):\n with _CodeHeatmapCalculator() as prof:\n result = self._run_object(*self._run_args, **self._run_kwargs)\n code_lines, start_line = inspect.getsourcelines(self._run_object)\n\n source_lines = []\n for line in code_lines:\n source_lines.append(('line', start_line, line))\n start_line += 1\n\n filename = os.path.abspath(inspect.getsourcefile(self._run_object))\n heatmap = prof.heatmap[filename]\n run_time = sum(time for time in heatmap.values())\n return {\n 'objectName': self._object_name,\n 'runTime': run_time,\n 'result': result,\n 'timestamp': int(time.time()),\n 'heatmaps': [{\n 'name': self._object_name,\n 'heatmap': heatmap,\n 'executionCount': prof.execution_count[filename],\n 'srcCode': source_lines,\n 'runTime': run_time\n }]\n }", "def execute(self, code):\n code = code()\n\n # Build an AST tree from the Python code, to get the line number of each statement\n try:\n nodes = compiler.parse(code).getChildNodes()[0].getChildNodes()\n lines = [node.lineno - 1 for node in nodes]\n except:\n self.executions += '>>> ' + code + '\\n' + ''.join(traceback.format_exception(*sys.exc_info())[4:])\n return\n\n code = code.splitlines()\n\n with IDEFrameContext.exec_lock:\n stdout = sys.stdout\n\n try:\n # Iterate over all the statements\n for (a, b) in zip(lines, lines[1:] + [None]):\n sys.stdout = StringIO()\n\n source = code[a:b]\n\n try:\n # Execute the statement using this local and global context\n frame = self.get_frame()\n exec compile('\\n'.join(source), '<web>', 'single', 0, 1) in frame.f_locals, frame.f_globals\n except:\n print ''.join(traceback.format_exception(*sys.exc_info())[2:]).rstrip()\n\n self.executions += '\\n'.join([('... ' if line.startswith(' ') else '>>> ') + line for line in source]) + '\\n' + sys.stdout.getvalue()\n finally:\n sys.stdout = stdout", "def go (fun, *args, **kwargs):\n if 'profile_filename' in kwargs:\n profile_filename = kwargs['profile_filename']\n del kwargs['profile_filename']\n else:\n profile_filename = '/tmp/coro_profile.bin'\n\n if 'profile_bench' in kwargs:\n profile_bench = kwargs['profile_bench']\n del kwargs['profile_bench']\n else:\n profile_bench = coro.rusage_bench\n\n p = coro.new_profiler (profile_bench)\n p.start()\n try:\n return fun (*args, **kwargs)\n finally:\n total_ticks = p.stop()\n user_ticks = _dump (p, profile_filename)", "def profile(f):\n def inner(*args, **kwargs):\n p = Profiler()\n result = p.runcall(f, *args, **kwargs)\n p.print_stats()\n return result\n return inner", "def cprofiler(fun, *args, **kwargs):\n print(f\"Profiling {fun.__name__}\")\n with cProfile.Profile() as pr:\n fun(*args, **kwargs)\n pr.print_stats()", "def main_code():\n pass", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def runcode(self, codeobj):\n\n\t\ttry:\n\t\t\told_display_hook, sys.displayhook = sys.displayhook, self._displayhook\n\t\t\told_stdout, sys.stdout = sys.stdout, self.outputbuffer\n\t\t\told_stdin, sys.stdin = sys.stdin, None\n\t\t\texec codeobj in self.globals, self.locals #pylint: disable-msg=W0122\n\t\texcept SystemExit:\n\t\t\traise\n\t\texcept:\n\t\t\tself.showtraceback()\n\t\telse:\n\t\t\tif code.softspace(self.outputbuffer, 0):\n\t\t\t\tself.outputbuffer.write(\"\\n\")\n\t\tfinally:\n\t\t\tsys.displayhook = old_display_hook\n\t\t\tsys.stdout = old_stdout\n\t\t\tsys.stdin = old_stdin" ]
[ "0.6385464", "0.62743026", "0.6012042", "0.5954541", "0.5947372", "0.5926711", "0.5884469", "0.5836414", "0.5775137", "0.5737329", "0.5666656", "0.5654356", "0.55813533", "0.55803514", "0.555477", "0.55139744", "0.5504729", "0.5470353", "0.5440272", "0.5439059", "0.5430186", "0.54072833", "0.540433", "0.5382272", "0.53556293", "0.53267586", "0.5325591", "0.5316247", "0.53047544", "0.5296031" ]
0.7272947
0
read feature file, find out mass shift then correct
def feature_file_mass_correction(feature_filename: str): output_feature_filename = feature_filename + '.mass_corrected' ppm_shift = [] with open(feature_filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) seq_index = header.index("seq") mz_index = header.index("m/z") z_index = header.index("z") for line in reader: mz = float(line[mz_index]) z = float(line[z_index]) observed_mass = mz * z - z * config.mass_H if not line[seq_index]: continue okay, peptide = parse_raw_sequence(line[seq_index]) if not okay: # unknown mods continue theoretical_mass = compute_neutral_peptide_mass(peptide) ppm = (observed_mass - theoretical_mass) / theoretical_mass * 1e6 ppm_shift.append(ppm) if len(ppm_shift) < 100: raise ValueError("too less identified feature for mass correction") ppm_shift = np.median(ppm_shift) print(f"ppm shift: {ppm_shift}") with open(feature_filename, 'r') as fr: with open(output_feature_filename, 'w') as fw: reader = csv.reader(fr, delimiter=',') writer = csv.writer(fw, delimiter=',') writer.writerow(next(reader)) for line in reader: mz = float(line[mz_index]) mz = mz * (1 - ppm_shift * 1e-6) line[mz_index] = "{}".format(mz) writer.writerow(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_msp(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50,windowed_mode=False):\n\n\tinfile = open(infile_name)\n\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(feat_lim_file).readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_msp(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"Name: \"):\n\t\t\tif len(temp_entry) == 0:\n\t\t\t\ttemp_entry.append(line.strip())\n\t\t\t\tcontinue\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\t\n\t\t\t#Make sure the current line is still used for the next entry\n\t\t\ttemp_entry = [line]\n\t\t\t\n\t\t\t#print(counter)\n\t\t\tcounter += 1\n\t\t\t\n\t\ttemp_entry.append(line.strip())\n\t\n\t#If everything is empty; return\n\tif len(temp_entry) == 0:\n\t\ttemp_entry.append(line.strip())\n\t\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)\n\n\t#Analyse the last record; since we do not know when the spectra ends\n\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\tinstance_names.append(identifier)\n\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features)\n\t\n\t#print(counter)\n\tcounter += 1\n\t\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)", "def read_mgf(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50):\t\t \n\t\n\tinfile = open(infile_name)\n\t\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(\"selected_features.txt\").readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_mgf(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"END IONS\"):\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_mgf(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\tcounter += 1\n\t\t\t#print(counter)\n\t\t\ttemp_entry = []\n\t\t\tcontinue\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tcontinue\n\t\ttemp_entry.append(line)\n\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )", "def get_efermi(fn):\n try:\n f = open(fn)\n except:\n return 0\n line = f.readline()\n f.close()\n ef = float(line.split()[6])\n print('Calculated Fermi level: {0}'.format(ef))\n return ef", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def read_smx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = eps_file.mdr_counter * n_node_per_line\n idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)\n\n data = {}\n metadata = {}\n\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n fields = [\"sat_track_azi\", \"abs_line_number\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan, long_nan),\n (\"latitude\", long_nan, long_nan),\n (\"swath_indicator\", byte_nan, byte_nan),\n (\"soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_error\", uint_nan, uint_nan),\n (\"sigma40\", long_nan, long_nan),\n (\"sigma40_error\", long_nan, long_nan),\n (\"slope40\", long_nan, long_nan),\n (\"slope40_error\", long_nan, long_nan),\n (\"dry_backscatter\", long_nan, long_nan),\n (\"wet_backscatter\", long_nan, long_nan),\n (\"mean_surf_soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_sensetivity\", ulong_nan, float32_nan),\n (\"correction_flags\", uint8_nan, uint8_nan),\n (\"processing_flags\", uint8_nan, uint8_nan),\n (\"aggregated_quality_flag\", uint8_nan, uint8_nan),\n (\"snow_cover_probability\", uint8_nan, uint8_nan),\n (\"frozen_soil_probability\", uint8_nan, uint8_nan),\n (\"innudation_or_wetland\", uint8_nan, uint8_nan),\n (\"topographical_complexity\", uint8_nan, uint8_nan)]\n\n for f, nan_val, new_nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = new_nan_val\n\n # sat_track_azi (uint)\n data[\"as_des_pass\"] = \\\n np.array(raw_data[\"SAT_TRACK_AZI\"].flatten()[idx_nodes] < 270)\n\n # modify longitudes from [0,360] to [-180,180]\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n fields = [\"param_db_version\", \"warp_nrt_version\"]\n for f in fields:\n data[f] = raw_data[\"PARAM_DB_VERSION\"].flatten()[idx_nodes]\n\n metadata[\"spacecraft_id\"] = int(eps_file.mphr[\"SPACECRAFT_ID\"][2])\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1), n_lines)\n\n data[\"line_num\"] = idx_nodes\n\n return data, metadata", "def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )", "def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()", "def readFT(self,file=\"out__1.ft\"):", "def spot1d_rsa(infile, sequence):\n data = np.loadtxt(infile, usecols=4, skiprows=1).reshape((1, -1, 1))\n for i in range(len(sequence)):\n data[0, i, 0] /= max_solvent_acc[sequence[i].upper()]\n\n return data", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def extract_maf(n):\n data = (line.split() for line in sys.stdin)\n next(data)\n for row in data:\n if row[4] == 'SNP' and float(row[-1]) > 0.01:\n row[1] = int(row[1])\n row[-1] = int(float(row[-1]) * 33)\n print(get_pouyak_name(chromosome(n), *row[:4]), row[-1])", "def profbval_strict(infile, sequence):\n result = np.zeros((1, len(sequence), 1))\n with open(infile, \"r\") as fh:\n it = 0\n for line in fh:\n if not line.startswith(\"number\"):\n pred_str = line.strip().split()[5]\n if pred_str == \"F\":\n result[0, it, 0] = 1\n it += 1\n\n return result", "def test_fake_file_xmm(self):\n fits_file = os.path.join(self.datadir, 'monol_test_fake_lc_xmm.evt')\n hen.fake.main(['--deadtime', '1e-4', '-m', 'XMM', '-i', 'epn',\n '--ctrate', '2000',\n '-o', fits_file])\n hdu_list = fits.open(fits_file)\n hdunames = [hdu.name for hdu in hdu_list]\n assert 'STDGTI01' in hdunames\n assert 'STDGTI02' in hdunames\n assert 'STDGTI07' in hdunames", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def read_model_shiftby_performances(lines):\n performances = {}\n patients = [str(x) for x in range(13)]\n # current_model = ''\n # shifts = [-100, -75, -50, -25, 25, 50, 100, 125, 150, 175, 200, 225, 250]\n for i, line in enumerate(lines):\n words = line.split(' ')\n if (len(words) == 10) and (words[0] == 'starting'):\n if 'vel' in words[-1][:-1]:\n variable = 'vel'\n else:\n variable = 'absVel'\n col_name = float(words[1][:-1])\n if (len(words) == 2) and (words[0] in patients):\n # shift_words = lines[i+1].replace(':', '').split(' ')\n # assert shift_words[0] == 'shift'\n # col_name = f'{variable}_' + '_'.join(shift_words)[:-1]\n # col_name = float(words[1][:-1])\n if col_name not in performances.keys():\n performances[col_name] = [float(words[1][:-1])]\n else:\n performances[col_name].append(float(words[1][:-1]))\n return performances", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype", "def read_forces(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n nats = len(self.atoms)\n forces = np.zeros((nats, 3), float)\n infinite_force=\"*****\"\n if 'mozyme' in self.str_params['job_type'].lower():\n for i, line in enumerate(lines):\n if line.find('FINAL POINT AND DERIVATIVES') != -1:\n for j in range(nats):\n gline = lines[i + j + 5]\n pre_force=gline[8:35]\n if(infinite_force in pre_force):\n forces[j] = [999999999.9999,999999999.9999,999999999.9999]\n else:\n forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]\n else:\n for i, line in enumerate(lines):\n if line.find('GRADIENT\\n') != -1:\n for j in range(nats * 3):\n gline = lines[i + j + 1]\n pre_force=gline[49:62]\n if(infinite_force in pre_force):\n forces[int(j/3), int(j%3)] =999999999.9999\n else:\n forces[int(j/3), int(j%3)] = float(pre_force)\n break\n#do not change unit for mopac\n forces *= - (kcal / mol)\n return forces", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def convert_matrix(infile, names,refdict,nosamples):\n \n if infile.endswith(\".gz\"):\n inf = gzip.open(infile, \"rb\")\n \n else:\n inf = open(infile, \"r\")\n for line in inf:\n line = line.rsplit()\n if line[0] == \"chromosome\":\n pass # header\n else:\n \n\n chrom = line[0]\n start = line[1]\n stop = line[2]\n TE = line[4]\n n_te = str(len(TE.split(\",\")))\n tes=TE.split(\",\")\n tefam=[]\n tesuperfamily=[]\n \n \n for i in xrange(len(tes)):\n \n tefam.append(refdict[tes[i]][0])\n \n tesuperfamily.append(refdict[tes[i]][1])\n \n \n superfamily=list(set(tesuperfamily))\n if 'Unknown' in superfamily:\n superfamily.remove('Unknown')\n if not superfamily:\n superfamily.append('Unknown')\n \n pos = line[5].split(\",\")\n neg = line[6].split(\",\")\n#missing = 305-(len(pos)+len(neg))/305\n te_id = \"\\t\".join([chrom, start, stop])\n status = get_status(pos, neg, names)\n column_ordered = []\n for i in names:\n column_ordered.append(status[i])\n noNA = filter(lambda x: x != \"NA\", status.values()) \n noNA = map(int, noNA)\n pos_count = sum(noNA)\n l = len(noNA)\n neg_count = l - pos_count\n TE_present=pos_count\n TE_absent=neg_count\n if(pos_count < neg_count):\n Minor_allele=\"presence\"\n\n else:\n Minor_allele=\"absence\"\n#print Minor_allele\n q20=int(0.2*nosamples)\n q80=int(0.8*nosamples)\n if (TE_absent < q20):\n Absence_classification=\"True deletion\"\n elif (TE_absent > q80):\n Absence_classification=\"No insertion\"\n else:\n Absence_classification=\"NA\"\n original_call_deletion = 'T'\n MAF=float(min(TE_present, TE_absent))/nosamples\n #print int(min(TE_present, TE_absent)) ,MAF\n if(MAF < 0.025):\n Frequency_classification = \"Rare\"\n else:Frequency_classification =\"Common\"\n print(te_id + \"\\t\" + TE + \"\\t\" + \",\".join(tefam) + \"\\t\" +\",\".join(superfamily) + \"\\t\" +n_te + \"\\t\" + str(pos_count) + \"\\t\" + str(neg_count) + \"\\t\" +str(Minor_allele) + \"\\t\" +original_call_deletion + \"\\t\" +str(Absence_classification) + \"\\t\" +str(MAF) + \"\\t\" +str(Frequency_classification) + \"\\t\"+\"\\t\".join(column_ordered))\n inf.close()", "def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)", "def read_file_agsm(self,filename):\n\n narr,larr,farr,iarr,nn,exceed_freqlim = \\\n aims_fortran.read_file_agsm(filename,config.npositive,config.agsm_cutoff, \\\n config.cutoff*self.cutoff)\n self.modes = np.array(zip(narr[0:nn],larr[0:nn],farr[0:nn],iarr[0:nn]),dtype=modetype)\n\n return exceed_freqlim", "def post_process(self, filename):\n title = self.title\n\n outfile = open(filename, 'r')\n data = outfile.readlines()\n\n name = data[0].strip()\n mode = data[1].strip()\n ops = data[2].strip().split(',')\n nl = 'True' in ops[0]\n ln = 'True' in ops[1]\n drv = 'True' in ops[2]\n\n data = data[3:]\n npt = len(data)\n\n t1u = np.empty((npt, ))\n t3u = np.empty((npt, ))\n t5u = np.empty((npt, ))\n flag = np.empty((npt, ), dtype=np.bool)\n x_dv = np.empty((npt, ))\n x_state = np.empty((npt, ))\n x_proc = np.empty((npt, ))\n\n for j, line in enumerate(data):\n x_dv[j], x_state[j], x_proc[j], flag[j], t1u[j], t3u[j], t5u[j] = line.strip().split(',')\n\n if np.any(flag):\n use_flag = True\n else:\n use_flag = False\n\n # Times are all normalized.\n t1 = t1u/t1u[0]\n t3 = t3u/t3u[0]\n t5 = t5u/t5u[0]\n\n if mode == 'state':\n x = x_state\n xlab = \"Number of states.\"\n elif mode == 'desvar':\n xlab = \"Number of design vars.\"\n x = x_dv\n elif mode == 'proc':\n x = x_proc\n xlab = \"Number of processors.\"\n\n if use_flag:\n\n flagtxt = self.flagtxt\n\n # Split them up. We know the pattern.\n t1F = t1[0::2]\n t1T = t1[1::2]\n t3F = t3[0::2]\n t3T = t3[1::2]\n t5F = t5[0::2]\n t5T = t5[1::2]\n\n xT = x[0::2]\n xF = x[1::2]\n\n # Generate plots\n\n if nl:\n plt.figure(1)\n plt.loglog(xF, t1F, 'bo-')\n plt.loglog(xT, t1T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel('Nonlinear Solve: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'nl'))\n\n if ln:\n plt.figure(2)\n plt.loglog(xF, t3F, 'o-')\n plt.loglog(xT, t3T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'ln'))\n\n if drv:\n plt.figure(3)\n plt.loglog(xF, t5F, 'o-')\n plt.loglog(xT, t5T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel(self.title_driver + ': Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'drv'))\n\n if self.special_plot_driver_on_linear:\n\n # Plot whatever driver does (e.g., coloring) on the same axis and normalization as linear time.\n t5 = t5u/t3u[0]\n t5F = t5[0::2]\n t5T = t5[1::2]\n\n plt.figure(4)\n plt.loglog(xF, t3F, 'o-')\n plt.loglog(xT, t3T, 'ro-')\n plt.loglog(xT, t5T, 'mo-')\n\n plt.xlabel(xlab)\n plt.ylabel('Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Compute Totals', 'Compute Totals: ' + flagtxt, self.title_driver], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'spec1'))\n\n else:\n\n # Generate plots\n\n if nl:\n plt.figure(1)\n plt.loglog(x, t1, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Nonlinear Solve: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'nl'))\n\n if ln:\n plt.figure(2)\n plt.loglog(x, t3, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'ln'))\n\n # For procs, we also view the time/proc as a function of number of procs.\n if mode == 'proc':\n plt.figure(3)\n plt.loglog(x, t3/x, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time per Processor')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s_per_proc.png\" % (name, mode, 'ln'))\n\n plt.show()\n print('done')", "def main():\n dir_path='.'\n meas_file='magic_measurements.txt'\n samp_file=\"er_samples.txt\"\n out_file='magic_measurements.txt'\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-WD' in sys.argv:\n ind = sys.argv.index('-WD')\n dir_path=sys.argv[ind+1]\n if '-f' in sys.argv:\n ind = sys.argv.index('-f')\n meas_file=sys.argv[ind+1]\n if '-fsa' in sys.argv:\n ind = sys.argv.index('-fsa')\n samp_file=sys.argv[ind+1]\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n out_file=sys.argv[ind+1]\n # read in measurements file\n meas_file=dir_path+'/'+meas_file\n out_file=dir_path+'/'+out_file\n samp_file=dir_path+'/'+samp_file\n data,file_type=pmag.magic_read(meas_file)\n samps,file_type=pmag.magic_read(samp_file)\n MeasRecs=[]\n sampnames,sflag=[],0\n for rec in data:\n for samp in samps:\n if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():\n if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())\n rec['er_site_name']=samp['er_site_name']\n rec['er_location_name']=samp['er_location_name']\n MeasRecs.append(rec)\n break\n if rec['er_sample_name'].lower() not in sampnames:\n sampnames.append(rec['er_sample_name'].lower())\n sflag=1\n SampRec={}\n for key in list(samps[0].keys()):SampRec[key]=\"\"\n SampRec['er_sample_name']=rec['er_sample_name']\n SampRec['er_citation_names']=\"This study\"\n SampRec['er_site_name']='MISSING'\n SampRec['er_location_name']='MISSING'\n SampRec['sample_desription']='recorded added by update_measurements - edit as needed'\n samps.append(SampRec)\n print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')\n rec['er_site_name']='MISSING'\n rec['er_location_name']='MISSING'\n MeasRecs.append(rec)\n pmag.magic_write(out_file,MeasRecs,'magic_measurements')\n print(\"updated measurements file stored in \", out_file)\n if sflag==1:\n pmag.magic_write(samp_file,samps,'er_samples')\n print(\"updated sample file stored in \", samp_file)", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def predyflexy(infile, sequence):\n result = np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))\n result[:, :10, 0] = 0\n result[:, -10:, 0] = 0\n return result", "def cam_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n return M,N", "def read_input_pizza(filename):\n lines = open(filename).readlines()\n M, N = [int(val) for val in lines[0].split()]\n available = np.array([int(n) for n in lines[1].split()])\n return M, N, available" ]
[ "0.61413145", "0.6100522", "0.56102365", "0.55467093", "0.55414826", "0.5517877", "0.55111635", "0.5491323", "0.5489486", "0.54673564", "0.5458751", "0.54487556", "0.5440117", "0.5364303", "0.53435946", "0.53398484", "0.5336551", "0.5330061", "0.5308056", "0.5289896", "0.526527", "0.5262551", "0.5247705", "0.52463704", "0.524317", "0.5235943", "0.52209574", "0.5219252", "0.52103275", "0.520664" ]
0.6439688
0
Factory method to create a cache object from github/spilchen/baseball_id_db This is called as part of package initialization and so can be refered to via the Lookup variable. >>> from baseball_id import Lookup >>> Lookup.from_yahoo_ids([10794, 9542, 7578])
def create(cls): ssl._create_default_https_context = ssl._create_unverified_context c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv') return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def construct(cls, obs_lists, platform_id):\n step = 0\n LookupTable = []\n while step < obs_lists.shape[0]:\n K = str(int(obs_lists[step, 0]))\n LookupTable.append(BaseCreateFactory(K, platform_id).create_object())\n step += 1\n return LookupTable", "def load_by_ids(cls,ids):\n if not ids or ids[0] == '':\n return None\n es = from_caches(ids) #(ids,'SuiBook') as prefixed\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiBook.get_by_id(map(lambda e:int(e),notfounds)) if e)\n to_caches(es2) #to_caches(dict(),time,key_prefix='SuiBook')\n es.update(es2)\n return es", "def __init__(self, simplecache=None, kodidb=None):\n\n if not kodidb:\n from kodidb import KodiDb\n self.kodidb = KodiDb()\n else:\n self.kodidb = kodidb\n\n if not simplecache:\n from simplecache import SimpleCache\n self.cache = SimpleCache()\n else:\n self.cache = simplecache", "def init():\n database = \"database.pkl\"\n\n onsite_bills = BillID(database)\n online_bills = BillID(database)\n\n return onsite_bills, online_bills", "def load_by_ids(cls,ids):\n es = from_caches(ids) #some are loaded from memcache, others are ignored.\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiGoods.get_by_id(map(lambda e:int(e),notfounds)))\n to_caches(es2)\n es.update(es2)\n return es", "def seek_by_id(cls,id):\n bk = from_caches('%s'%id)\n if not bk:\n bk = SuiBook.get_by_id(int(id))\n if bk:\n to_cache('%s'%id, bk)\n return bk", "def __init__(self, *args, **kw):\n # kw['strIdent'] = DBCAT\n BaseDB.__init__(self, *args, **kw)\n # cache by project name as key and project Id as value\n self._gbl_projectid_cache = {}", "def symbol_factory(self, id, bp=0):\n try:\n s = self.sym[id] # if already in table don't do anything. Memoized\n except KeyError as e:\n\n class s(BaseSymbol): \n # create appropriate symbol class at run time\n pass\n\n s.__name__ = \"sym(\" + id + \")\"\n s.id = id\n s.lbp = bp\n s.parent = self\n self.sym[id] = s\n else:\n s.lbp = max(bp, s.lbp)\n return s # NOTE: This function does not returns an object. It returns the class", "def _cache(item_label, item_list):\n id_label = item_label + '_id'\n mbid_label = item_label + '_mbid'\n echonest_id_label = item_label + '_echonest_id'\n items = {}\n for item in item_list:\n key = '/%s/%s' % (item_label, item[id_label])\n items[key] = item\n musicbrainz_id = item.get(mbid_label, None)\n if musicbrainz_id:\n items['/musicbrainz/%s/%s' % (item_label, musicbrainz_id)] = key\n # echonest_id = item.get(echonest_id_label, None)\n # if echonest_id:\n # items['/echonest/%s/%s' % (item_label, echonest_id)] = key\n application.config.get('CACHE').set_many(items)", "def create_db_from_cache():\n with open('matches.cache', 'rb') as f:\n matches = pickle.load(f)\n\n Base.metadata.create_all(engine)\n match_loader(matches)", "def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n cls.__instance = super(CacheManagerSingleton, cls).__new__(cls)\n # Generate all ground truth data files from hard-coded data\n CacheManagerSingleton.export_all_ground_truth_data()\n return cls.__instance", "def _from_db_object_list(db_objects, cls, context):\n return [Boar._from_db_object(cls(context), obj)\n for obj in db_objects]", "def lookup(cls, _db, short_name):\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library", "def make_library_cache(prefix):\n # avoid cache prefix reuse\n assert prefix not in _lib_cache_prefixes\n _lib_cache_prefixes.add(prefix)\n\n class CustomCodeLibraryCacheImpl(CodeLibraryCacheImpl):\n _filename_prefix = prefix\n\n class LibraryCache(Cache):\n \"\"\"\n Implements Cache that saves and loads CodeLibrary objects for additional\n feature for the specified python function.\n \"\"\"\n _impl_class = CustomCodeLibraryCacheImpl\n\n return LibraryCache", "def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()", "def make_crypto_db():\n threading.Timer(3600, make_crypto_db).start()\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/?limit=0')\n all_coins = req.json()\n for coins in all_coins:\n name_id_map[coins['name'].lower()] = coins['id']\n symbol_id_map[coins['symbol'].lower()] = coins['id']", "def load_srumid_lookups(database):\n id_lookup = {}\n #Note columns 0 = Type, 1 = Index, 2 = Value\n lookup_table = database.get_table_by_name('SruDbIdMapTable')\n column_lookup = dict([(x.name,index) for index,x in enumerate(lookup_table.columns)]) \n for rec_entry_num in range(lookup_table.number_of_records):\n bin_blob = smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdBlob'])\n if smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdType'])==3:\n bin_blob = BinarySIDtoStringSID(bin_blob)\n elif not bin_blob == \"Empty\":\n bin_blob = blob_to_string(bin_blob)\n id_lookup[smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdIndex'])] = bin_blob\n return id_lookup", "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "def test_multidb_fetch_by_id(self):\r\n with self.settings(FETCH_BY_ID=True):\r\n assert Addon.objects.get(id=1).from_cache is False\r\n assert Addon.objects.get(id=1).from_cache is True\r\n\r\n from_slave = Addon.objects.using('slave').get(id=1)\r\n assert from_slave.from_cache is False\r\n assert from_slave._state.db == 'slave'", "def __init__(self, obj):\n self.obj = obj\n self._pkcache = {}\n self._idcache = obj.__class__.__instance_cache__\n self._typecache = defaultdict(dict)\n self.init()", "def from_cache(self, cache_key=None, pk=None):\n if pk:\n cache_key = self._cache_key(pk)\n # if cache_key is none, the mangler will generate a MD5 from the query\n return FromCache(self.label, cache_key)", "def __init__(self, cachefile=None):\n self.cache = dict()\n self.sites = []\n if cachefile:\n try:\n with open(cachefile, 'rb') as cf:\n saved_sites = pickle.load(cf)\n for sitename, popularity, latency, content in saved_sites:\n if content is None: continue\n self.cache_site(sitename, popularity, content, latency)\n except Exception as e:\n print('Failed to open cachefile \"{}\": {}'.format(cachefile, e), file=sys.stderr)", "def from_list(cls, ticker_list, start, end, get_ohlcv=False,\n get_fundamentals=False):\n\n if get_fundamentals:\n cls._init_spiders(ticker_list=ticker_list, start_date=start,\n end_date=end)\n\n with db.transactional_session() as session:\n for ticker in ticker_list:\n session.add(cls(ticker=ticker, start_date=start, end_date=end,\n get_ohlcv=get_ohlcv,\n get_fundamentals=get_fundamentals))", "def getJsonDbFactory(emailOptions, perfToTrackOptions, JsonableRecordClass, savedFilesTracker):\n keyFunc = lambda x: ((-1 if perfToTrackOptions.isLargerBetter else 1)*getattr(x,perfToTrackOptions.perfAttrName))\n JsonableRecordsHolderClass = jsondb.getSortedJsonableRecordsHolderClass(keyFunc=keyFunc); \n #the metadata callbacks are: print if there's a new best, and also save\n #the best performing model.\n metadataCallbacks = [getPrintIfNewBestCallback()];\n if emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails, EmailModes.errorsAndNewBest]:\n metadataCallbacks.append(getEmailIfNewBestCallback(emailOptions, perfToTrackOptions));\n callbacks_beforeAdd = [ renameFilesWithRecordNumberCallback(savedFilesTracker)\n , getSaveBestFilesCallback(perfToTrackOptions, savedFilesTracker)\n , getSaveSomeFilesCallback(perfToTrackOptions, savedFilesTracker)];\n callbacks_afterAdd = [getPrintAddedRecordCallback()]\n if (emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails]):\n callbacks_afterAdd.append(getEmailRecordAddedCallback(emailOptions)); \n\n MetadataClass = jsondb.getUpdateValsMetadataClass(\n [jsondb.MetadataUpdateInfo(\n metadataAttrName=perfToTrackOptions.perfAttrName\n ,recordAttrName=perfToTrackOptions.perfAttrName\n ,updateFunc=getBestUpdateFunc(\n isLargerBetter=perfToTrackOptions.isLargerBetter\n ,metadataCallbacks=metadataCallbacks)\n ,initVal=None)\n ,jsondb.NumRecordsMetadataUpdateInfo]\n ,[RunTrackerMetadataFields.bestPerfSavedFiles]); \n jsonDbFactory = jsondb.JsonDb.getFactory(JsonableRecordClass=JsonableRecordClass\n ,JsonableRecordsHolderClass=JsonableRecordsHolderClass\n ,MetadataClass=MetadataClass\n ,callbacks_beforeAdd=callbacks_beforeAdd\n ,callbacks_afterAdd=callbacks_afterAdd); \n return jsonDbFactory;", "def fake_db() -> Callable[[None], FakeRedis]:\n @lru_cache\n def wrapper() -> FakeRedis:\n db = FakeRedis(decode_responses=True)\n return db\n\n return wrapper", "def fetch_objects(cache_key_f, get_database_f, item_keys):\r\n item_key_to_item = get_many_by_key(cache_key_f, item_keys)\r\n \r\n for item_key in item_keys:\r\n if item_key not in item_key_to_item:\r\n # failed to get the item from the cache\r\n try:\r\n # have to get each item individually to cache the query\r\n item = get_database_f(item_key)\r\n item_key_to_item[item_key] = item\r\n except ObjectDoesNotExist:\r\n pass\r\n \r\n return item_key_to_item", "def __init__(self,db,tables=[]):\n #{{{ Load class and test databases\n self.dbcentral = db\n self.tables = tables\n self.debug = config.debug\n self.null_vals = defaultdict(lambda: defaultdict(dict))\n\n \"\"\"\n Load values from databases\n \"\"\"\n self._get_nulls()", "def create_proxy_dict() -> DictProxy:\n manager = new_manager()\n cache_ids = manager.dict() # type: DictProxy\n return cache_ids", "def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)" ]
[ "0.6530035", "0.5592927", "0.5449668", "0.54129845", "0.5390788", "0.5236379", "0.52326137", "0.52017933", "0.5083397", "0.50474405", "0.49929607", "0.4948093", "0.49268007", "0.48906374", "0.48458242", "0.48337904", "0.4833684", "0.48185053", "0.481795", "0.4808004", "0.47927582", "0.47793254", "0.47570118", "0.4744562", "0.4722962", "0.4712565", "0.47098136", "0.47068498", "0.46908027", "0.46823695" ]
0.6934165
0
Factory method to create a fake data source This refers to a static data file that is in the current package. This function exists for testing purposes as it avoids network traffic to get the actual uptodate ID mapping.
def create_fake(cls): source = pkg_resources.open_text('baseball_id', 'sample.master.csv', encoding='iso-8859-1') c = lookup.Cache(source) return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def init_locally_processed_dataset(directory, source_datasets, uuid_=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestamp(directory.stat().st_ctime),\n lineage=ptype.LineageMetadata(\n machine=ptype.MachineMetadata(\n hostname=socket.getfqdn(),\n runtime_id=_RUNTIME_ID,\n uname=' '.join(os.uname())\n ),\n source_datasets=source_datasets\n )\n )\n _note_package_vers(md)\n return md", "def _generate_data(self, codec='deflate'):\n _logger.info('generating fake data')\n (desc, path) = mkstemp()\n os.close(desc)\n os.remove(path)\n try:\n call([\n 'node', osp.join(DPATH, os.pardir, os.pardir, 'scripts', 'random'),\n self.path, str(self.n_records), path\n ])\n yield path\n finally:\n if osp.exists(path):\n os.remove(path)", "def init_existing_dataset(directory, source_datasets, uuid_=None, source_hostname=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestamp(directory.stat().st_ctime),\n lineage=ptype.LineageMetadata(\n machine=ptype.MachineMetadata(\n hostname=source_hostname\n ),\n source_datasets=source_datasets\n )\n )\n _note_package_vers(md)\n return md", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def fixture_example_data():\n import_example_data()", "def code_builder(request, tmp_path_factory) -> dataset_builder.DatasetBuilder:\n tmp_path = tmp_path_factory.mktemp('tfds_datasets') # Temporary data_dir\n builder_cls = request.param\n # Generate the dataset (only once for all tests as scope == 'module').\n builder = builder_cls(data_dir=tmp_path)\n builder.download_and_prepare()\n\n # Update the default DATA_DIR during the test.\n with mock.patch.object(constants, 'DATA_DIR', str(tmp_path)):\n yield builder", "def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr", "def load_data_source(data_source):\n source_module = __import__('source_'+data_source)\n get_source = getattr(source_module, 'get_source')\n return get_source()", "def fixture_retrieved():\n from aiida.plugins import DataFactory\n from aiida_logger.tests import TEST_DIR\n\n retrieved = DataFactory('folder')()\n retrieved.put_object_from_tree(path=os.path.join(TEST_DIR, 'input_files'))\n\n return retrieved", "def dataset_initialize(self, folder):\r\n if not os.path.isdir(folder):\r\n raise ValueError('Invalid folder: ' + folder)\r\n\r\n ref = self.config_values[self.CONFIG_NAME_USER] + '/INSERT_SLUG_HERE'\r\n licenses = []\r\n default_license = {'name': 'CC0-1.0'}\r\n licenses.append(default_license)\r\n\r\n meta_data = {\r\n 'title': 'INSERT_TITLE_HERE',\r\n 'id': ref,\r\n 'licenses': licenses\r\n }\r\n meta_file = os.path.join(folder, self.DATASET_METADATA_FILE)\r\n with open(meta_file, 'w') as f:\r\n json.dump(meta_data, f, indent=2)\r\n\r\n print('Data package template written to: ' + meta_file)\r\n return meta_file", "def makeIdFactory(self, dataRef):\n # With the default configuration, this IdFactory doesn't do anything, because\n # the IDs it generates are immediately overwritten by the ID from the reference\n # catalog (since that's in config.measurement.copyColumns). But we create one here anyway, to\n # allow us to revert back to the old behavior of generating new forced source IDs,\n # just by renaming the ID in config.copyColumns to \"object_id\".\n expBits = dataRef.get(self.config.coaddName + \"CoaddId_bits\")\n expId = int(dataRef.get(self.config.coaddName + \"CoaddId\"))\n return lsst.afw.table.IdFactory.makeSource(expId, 64 - expBits)", "def registerSampleData():\n # It is always recommended to provide sample data for users to make it easy to try the module,\n # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.\n\n import SampleData\n iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')\n\n # To ensure that the source code repository remains small (can be downloaded and installed quickly)\n # it is recommended to store data sets that are larger than a few MB in a Github release.\n\n # RegularizedFastMarching1\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching1',\n # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.\n # It can be created by Screen Capture module, \"Capture all views\" option enabled, \"Number of images\" set to \"Single\".\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching1.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95\",\n fileNames='RegularizedFastMarching1.nrrd',\n # Checksum to ensure file integrity. Can be computed by this command:\n # import hashlib; print(hashlib.sha256(open(filename, \"rb\").read()).hexdigest())\n checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching1'\n )\n\n # RegularizedFastMarching2\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching2',\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching2.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97\",\n fileNames='RegularizedFastMarching2.nrrd',\n checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching2'\n )", "def mock_legacy_dataset(mock_dataset_with_cache_dir):\n archive_path = os.path.join(resource_filename('gtmcore.dataset.tests', 'data'), 'test-legacy-dataset.zip')\n temp_path = os.path.join(tempfile.gettempdir(), 'test-legacy-dataset.zip')\n shutil.copyfile(archive_path, temp_path)\n conf_file = mock_dataset_with_cache_dir[0].client_config.config_file\n import_dataset_from_zip(archive_path=temp_path, username=USERNAME,\n owner=USERNAME, config_file=conf_file)\n\n im = InventoryManager()\n ds = im.load_dataset(USERNAME, USERNAME, 'test-legacy-dataset')\n m = Manifest(ds, USERNAME)\n\n # yield dataset, manifest, working_dir\n yield ds, m, mock_dataset_with_cache_dir[1]", "def F(f):\n return datafile(f, __name__)", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def __fake_data__(self):\n\n # Set directory for configuration files\n self.configFilePath = q.system.fs.joinPaths(q.dirs.varDir, 'tftproot')\n \n # Add some share's\n for i in xrange(3):\n share = NFSShare()\n share.name = 'share-%s' % q.base.idgenerator.generateRandomInt(0, 255)\n self.shares[share.name] = share", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def init_static_data(log_to_console=False):\n # These are annoyingly necessary to live in the DB, currently. \n # Really this should be app logic, I think.\n load_report_types()\n load_roles()\n loc_file = getattr(settings, \"STATIC_LOCATIONS\")\n if loc_file:\n load_locations(loc_file, log_to_console=log_to_console)\n product_file = getattr(settings, \"STATIC_PRODUCTS\")\n if product_file:\n load_products(product_file, log_to_console=log_to_console)", "def autogen_dataset_dir():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',')", "def prepare_dataset(fpath):\n raise NotImplementedError", "def __init__(self, data_source_identifier, verbose=True):\n pass", "def dir_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular', sep=',')", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def test_data_source_soaps_id_dynamic_datas_post(self):\n pass", "def __init__(self, table, ioloop, iex_source, **kwargs):\n data_cleaner = kwargs.pop(\"data_cleaner\")\n super(IEXStaticDataSource, self).__init__(\n table, ioloop, data_cleaner=data_cleaner\n )\n self._iex_source = iex_source\n self._iex_source_kwargs = kwargs", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def mock_dataset_with_cache_dir():\n conf_file, working_dir = _create_temp_work_dir()\n with patch.object(Configuration, 'find_default_config', lambda self: conf_file):\n im = InventoryManager(conf_file)\n ds = im.create_dataset(USERNAME, USERNAME, 'dataset-1', description=\"my dataset 1\",\n storage_type=\"gigantum_object_v1\")\n\n yield ds, working_dir, ds.git.repo.head.commit.hexsha\n shutil.rmtree(working_dir)", "def test_data_source_soaps_id_get(self):\n pass" ]
[ "0.6308188", "0.62671566", "0.6175177", "0.61343235", "0.5995582", "0.5915337", "0.59063405", "0.58865434", "0.5807432", "0.57975435", "0.5794116", "0.57484156", "0.5740665", "0.56791466", "0.56658155", "0.56552786", "0.5642972", "0.56229484", "0.5622516", "0.56089175", "0.56014436", "0.5600478", "0.5591214", "0.5575909", "0.55355436", "0.55352795", "0.5533836", "0.55194515", "0.5509626", "0.5500959" ]
0.70590085
0
The extracter moves files. Arguments input_folder and output_folder are set through GUI. Based on the values in the column called column_name in the spreadsheet, files are copied from input_folder to output_folder. Here, these are the gilbert_numbers in the spreadsheet fed from main(). The are matched to the file names. Each gilber_number gets its own directory in the output_folder. output_folder should be empty, at least not contain the same gilbert_numbers already. Also copies all speaker files from input_folder to output_folder.
def extracter(spreadsheet, column_name): print header, "Running the extracter." root=Tkinter.Tk() root.withdraw() root.update() input_folder=tkFileDialog.askdirectory(title="Inputfolder: Please choose a directory that contains your corpus files") root=Tkinter.Tk() root.withdraw() root.update() output_folder=tkFileDialog.askdirectory(title="Outputfolder: Please choose a directory to copy files into") print header, "Copying files from '{}' to '{}'.".format(input_folder, output_folder) #collecting input files inputfiles=[] print "Locating files." for dirpath, subdirs, files in os.walk(input_folder): for f in files: inputfiles.append(os.path.join(dirpath, f)) if len(inputfiles) in [1000,2000,4000,8000,1600,24000]: print "{} files processed, still working.".format(len(inputfiles)) print "Found {} files.".format(len(inputfiles)) #read from spreadsheet # with open(spreadsheet, "r") as spreadsheet: # spreadsheet=pandas.read_csv(spreadsheet, encoding="utf-8") numbers_to_be_extracted= spreadsheet[column_name].unique() print header, "Gilbert numbers to be extracted:" print ",".join([unicode(i) for i in numbers_to_be_extracted]) #copying speaker files print header, "Copying speaker files." speakerfiles=[f for f in inputfiles if re.match(".*\.txt", os.path.split(f)[1])] os.mkdir(os.path.join(output_folder, "speakers")) for s in speakerfiles: shutil.copy2(s, os.path.join(output_folder, "speakers")) #finding relevant input files result=[] for number in numbers_to_be_extracted: print "Processing {}, creating folder '{}'.".format(number, number) os.mkdir(os.path.join(output_folder, unicode(number))) regex="(\d+)-(\d+)-(\d+)-"+number.astype('U')+"-(\D+)\.wav" findings= [f for f in inputfiles if re.match(regex, os.path.split(f)[1])] result= result+findings for find in findings: shutil.copy2(find, os.path.join(output_folder, unicode(number), os.path.split(find)[1])) print header, "{} files have been copied to {}.".format(len(result), output_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def main(inputfolder):\n inputfolder = realpath(inputfolder)\n for data in DATASET:\n for fol in FOLDERS:\n actfile = join(inputfolder, data, data+'.txt')\n logger.info('Changing data in: %s' % actfile)\n filedata = []\n with open(actfile) as fin:\n for line in fin:\n id, y = map(int, line.strip().split('\\t'))\n if y == -1000:\n y = 0\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id)+'.jpg')\n filedata.append((path, y))\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id+1)+'.jpg')\n filedata.append((path, y))\n with open(actfile, 'w') as fout:\n for path, y in filedata:\n fout.write('%s %d\\n' % (path, y))", "def move_generators_to_input(self, generator_folder_glob):\n spawn_folder_names = []\n generator_folders = glob(generator_folder_glob)\n for i, folder in enumerate(generator_folders):\n base_name = 'e01s{:02d}_{}f0000'.format(i + 1, os.path.basename(folder))\n input_destination = os.path.join(self.input_folder, base_name)\n data_destination = os.path.join(self.data_folder, base_name)\n create_folder(input_destination)\n create_folder(data_destination)\n spawn_folder_names.append(input_destination)\n create_symlinks(\n files=os.path.join(folder, '*'),\n dst_folder=os.path.relpath(input_destination)\n )\n return spawn_folder_names", "def binder(folder_name: str, output_name: str = \"output.exe\", verbose=True):\n\n # we get all the files from the given folder\n files: List[str] = os.listdir(folder_name)\n\n if files == []:\n print(\" No file in \", folder_name, \" folder\")\n return\n\n # we sort then by comparing the concatenated number\n files = sorted(files, key=lambda x: int(x.split(\"_\")[0]))\n\n if verbose:\n print(\"encoutered {} files:\".format(len(files)))\n for file in files:\n print(file)\n\n # we open an output stream\n with open(output_name, \"wb+\") as output_stream:\n # And for every gathered files\n for file in files:\n with open(os.path.join(folder_name, file), \"rb\") as input:\n # we add it at the end of the document\n output_stream.write(input.read())\n\n print(\"Done!\")", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def jarvis(input_path, output_path): \n\n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n file_list = [filename for filename in os.listdir(f'{input_path}') if '.tif' in filename]\n\n for filename in file_list:\n pathname = os.path.join(input_path, filename)\n new_name = f\"{output_path}{filename.replace('.lif - ', '_').replace('_5x-', '_')}\"\n copyfile(pathname, new_name)\n logger.info(f'{new_name}')", "def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)", "def main():\n # file path to csv file\n filePath = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS_selectedList_Final.csv\"\n # Get targeted List of picture number\n fileNameList = importSelectedList(filePath)\n\n # Check duplicated item in the list\n print(pd.Series(fileNameList)[pd.Series(fileNameList).duplicated()].values)\n print(len(set(fileNameList)))\n \n # Copy all the selected picture to the targeted folder\n for i in fileNameList:\n # Declare src and dest \n src = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS 1-20 Images\\\\\" + str(i) + r\".jpg\"\n dest = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS 1-20 Images\\\\Sample_final\\\\\" + str(i) + r\".jpg\"\n copyFile(src,dest)", "def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))", "def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)", "def read_input_txt_file(self, inputfile, outputfolder):\n\n # set output folder from sys argv and append \\\\\n self.outputfolder = outputfolder + \"\\\\\"\n\n with open(inputfile, 'r') as file2open:\n # for each line split into columns\n for line in file2open:\n #split line on tab\n splitline=line.split('\\t')\n \n \n # check if any empty lines, or fields are present in input file. do not check prefix (last element in list)\n if '' in splitline[0:7]:\n raise ValueError(\"\\nError in the input file! \\nHave you used Excel?!?!?! \\n\\\n Please open in notepad and ensure there are no blank lines and all fields are present\")\n \n # assign each value to a variable\n # barcode, subarray (numeric), dye and scan number for file 1\n file1_barcode=splitline[0]\n file1_subarray=int(splitline[1])\n file1_dye=splitline[2]\n file1_scan_number=splitline[3]\n \n # barcode, subarray (numeric), dye and scan number for file 2\n file2_barcode=splitline[4]\n file2_subarray=int(splitline[5])\n file2_dye=splitline[6]\n file2_scan_number=splitline[7].rstrip()\n \n \n # a prefix can be added to as the last column, which is added to the start of the output filename (len(splitline) == 9)\n if len(splitline)==9: \n # capture prefix and remove newline\n out_file_prefix=splitline[8].rstrip()\n #check the prefix is not empty\n assert len(out_file_prefix)!= 0,\"Prefix column is empty, were you trying to add a prefix??!\"\n \n #and append an underscore to help later.\n out_file_prefix=out_file_prefix+\"_\"\n # if no prefix specified\n else:\n out_file_prefix=None\n \n # check the given subarray values are valid. if they are not the text value will not be returned from the dictionary\n assert file1_subarray in self.subarray_dict, \"the given subarray for the Cy3 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n assert file2_subarray in self.subarray_dict, \"the given subarray for the Cy5 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n \n # convert the given subarray (an integer 1-8 - the keys in self.subarray_dict) into the string used in the file name (the values in self.subarray_dict)\n file1_subarray=self.subarray_dict[file1_subarray]\n file2_subarray=self.subarray_dict[file2_subarray]\n \n\n # concatenate barcode, scan number and subarray text string to create a filename pattern to search for\n filename1 = str(file1_barcode) + \"_S0\"+file1_scan_number+\"*\" + file1_subarray\n filename2 = str(file2_barcode) + \"_S0\"+file2_scan_number+\"*\" +file2_subarray\n\n # append to a list\n self.files_to_find.append((filename1, file1_dye, filename2, file2_dye,out_file_prefix))", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def main_one(string_path_to_folder, destination_folder):\n # .jpg and .JPG are the same\n # photos = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.JPG\") # Examples of location format\n # pho = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.jpg\")\n photos = glob.glob(string_path_to_folder+\"/*.JPG\")\n print(\"Number of files: \", len(photos))\n for k in photos:\n print(get_photo_date(k))\n process_all(k, destination_folder)", "def batch_preprocess(self, input_folder, output_folder, padding=20):\n input_files = glob.glob(input_folder + '/*')\n for input_path in input_files:\n subject_name = re.search(self.KEY_WORD_FILE, input_path).group()\n output_path = output_folder + '/' + subject_name\n\n data, options = nrrd.read(input_path)\n data, options = self.pad_upper(data, options, padding)\n data, options = self.filter_background_to_air(data, options)\n\n print 'write ' + output_path\n nrrd.write(output_path, data, options) # too slow in Python", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def preprocess(input_folder, output_folder, T, skip, overwrite=False):\n original_labels = ['songID', 'time', 'A_t', 'A#_t', 'B_t', 'C_t', 'C#_t', 'D_t', 'D#_t', 'E_t', 'F_t', 'F#_t',\n 'G_t', 'G#_t']\n input_file_paths = sorted([os.path.join(input_folder, p) for p in os.listdir(input_folder) if p.startswith('chroma-nnls')])[-10:-9]\n print(input_file_paths)\n # input_file_paths = _create_file_paths(input_folder)\n for f in input_file_paths:\n logging.info(\"Working on file {}\".format(f))\n data = pd.read_csv(f, header=None, names=original_labels)\n data['songID'] = data['songID'].apply(_take_id) # take just the ID of the song\n data['songID'] = data['songID'].fillna(method='ffill') # repeat the ID for all rows\n for s in set(data['songID']):\n path_output = os.path.join(output_folder, 'chroma-nnls_' + s + '.csv')\n if not overwrite and os.path.isfile(path_output):\n logging.info(\"Output file {} already exists. Skipping songID {}\".format(path_output, s))\n continue\n logging.info(\"Working on songID {}\".format(s))\n df = data.loc[data['songID'] == s] # select one song at a time not to use too much memory\n df = _create_datapoints_for_dnn(df, T, skip) # add the desired columns\n df.to_csv(path_output, header=False, index=False) # write the df in a file\n return", "def test_input_folders_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n folder = data_dir + \"build-custom/files/more/\"\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folders_files\"\n params[\"input\"] = files + [folder]\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files.extend(list_files_folder(folder, ext=params[\"input_extension\"]))\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def multiple(folder_name: str,\r\n min_plant_pixels: int = MIN_PLANT_SIZE,\r\n output_options = [['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'distances'],\r\n \r\n ['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers'],\r\n \r\n ['dirt',\r\n 'ditches',\r\n 'rows',\r\n 'clusters',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers',\r\n 'lines']\r\n ]) -> None:\r\n\r\n # Go to the specified folder\r\n ls = listdir(folder_name)\r\n ls = [join(folder_name, i) for i in ls]\r\n\r\n # Check if the folder exists\r\n if join(folder_name, 'Analysis') in ls:\r\n\r\n # If it does, rename the old folder\r\n new_name = join(folder_name, 'Analysis')\r\n while new_name in ls:\r\n new_name += '_old'\r\n \r\n rename(join(folder_name,'Analysis'), new_name)\r\n\r\n # Create new folders inside the given directory\r\n mkdir(join(folder_name, 'Analysis'))\r\n mkdir(join(folder_name, 'Analysis/Images'))\r\n mkdir(join(folder_name, 'Analysis/Data'))\r\n \r\n # Gather the images to be analysed\r\n co = 0\r\n pics = [j for j in ls if isfile(j)]\r\n le = len(pics)\r\n\r\n # Analyze each of the pictures\r\n for i in pics:\r\n\r\n # Make the field\r\n field = just_field(i, min_plant_pixels)\r\n\r\n # Measure the field and save results\r\n print('Saving data...\\n')\r\n ruler = Ruler(field)\r\n \r\n ruler.output_distances(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Distances.csv'.format(basename(i).split('.')[0])\r\n ) \r\n )\r\n \r\n ruler.output_row_info(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Rows.csv'.format(basename(i).split('.')[0])\r\n )\r\n )\r\n\r\n # Make and save visuals\r\n print('Saving pictures...\\n')\r\n for k in range(len(output_options)):\r\n output_options[k]\r\n img = field.make_visual(ruler, output_options[k])\r\n img.save(\r\n join(folder_name,\r\n 'Analysis/Images/{}_Visual_{}.png'.format(basename(i).split('.')[0], k + 1)))\r\n\r\n # Increment the progress meter\r\n co += 1\r\n print('Completed {}/{} images\\n\\n'.format(co, le))", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def jarvis(input_path, output_path): \n \n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n folder_list = [sample for sample in os.listdir(input_path) if os.path.isdir(f'{input_path}{sample}')]\n\n for folder in folder_list:\n\n file_list = [filename for filename in os.listdir(f'{input_path}{folder}/') if '.tif' in filename]\n mutant = '_'.join(folder.split(' '))\n\n for x, filename in enumerate(file_list):\n pathname = os.path.join(input_path, folder, filename)\n new_name = f'{output_path}{mutant}_{x}.tif'\n copyfile(pathname, new_name)\n # array_stack = skimage.io.imread(f'{pathname}').transpose(1, 2, 0)\n logger.info(f'{new_name}')", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def browse_input(self):\n path = getAFolder()\n if len(path) > 0:\n self.in_directory.setText(path)\n self.out_directory.setText(join(path, 'merged_results'))\n self.preprocessfolder()", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')" ]
[ "0.6164347", "0.5919336", "0.5901154", "0.58560616", "0.57583755", "0.5725686", "0.571907", "0.56493175", "0.55794436", "0.5573059", "0.55641127", "0.55598956", "0.5552147", "0.55351996", "0.5534053", "0.5514603", "0.55066884", "0.5490536", "0.54603547", "0.5438883", "0.5437651", "0.5428646", "0.5425792", "0.54197073", "0.5378494", "0.53693575", "0.53616387", "0.5328064", "0.53086877", "0.5304949" ]
0.76440537
0
Build or update a Ticker metrics using a Quotecast object. Only the metrics which can be converted to float are supported. But that should be enough to handle all the real use cases.
def build_ticker_from_quotecast( quotecast: Quotecast, references: Dict[int, List[str]] = None, ticker: Ticker = None, ) -> Ticker: if references is None: references = dict() if ticker is None: ticker = Ticker() # SETUP PRODUCTS & METRICS message_array = json.loads(quotecast.json_data) for message in message_array: if message["m"] == "un": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] ticker.products[product].metrics[metric] = value elif message["m"] == "us": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] if value[4] == "-": date = datetime.datetime.strptime( value, "%Y-%m-%d", ) value = datetime.datetime.timestamp(date) ticker.products[product].metrics[metric] = value elif value[2] == ":": time = datetime.time.fromisoformat(value) value = time.hour * 3600 + time.minute * 60 + time.second ticker.products[product].metrics[metric] = value else: # NOT CONVERTIBLE TO FLOAT raise RuntimeWarning( "Unsupported string metric : " f"{metric} = {message}" ) elif message["m"] == "a_req": references[message["v"][1]] = message["v"][0].rsplit( sep=".", maxsplit=1, ) elif message["m"] == "a_rel": delete_list = [] for reference in references: if ".".join(references[reference]) == message["v"][0]: delete_list.append(reference) for reference in delete_list: del references[reference] elif message["m"] == "h": pass elif message["m"] == "ue": pass elif message["m"] == "d": raise AttributeError(f"Subscription rejected : {message}") else: raise AttributeError(f"Unknown metric : {message}") # SETUP PRODUCT LIST ticker.product_list.extend(ticker.products) # SETUP METADATA ticker.metadata.MergeFrom(quotecast.metadata) return ticker
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.data.update()\n stats = self.data.stats\n ticker = self.data.ticker\n\n if self.type == \"exchangerate\":\n self._attr_state = ticker[self._currency].p15min\n self._attr_unit_of_measurement = self._currency\n elif self.type == \"trade_volume_btc\":\n self._attr_state = f\"{stats.trade_volume_btc:.1f}\"\n elif self.type == \"miners_revenue_usd\":\n self._attr_state = f\"{stats.miners_revenue_usd:.0f}\"\n elif self.type == \"btc_mined\":\n self._attr_state = str(stats.btc_mined * 0.00000001)\n elif self.type == \"trade_volume_usd\":\n self._attr_state = f\"{stats.trade_volume_usd:.1f}\"\n elif self.type == \"difficulty\":\n self._attr_state = f\"{stats.difficulty:.0f}\"\n elif self.type == \"minutes_between_blocks\":\n self._attr_state = f\"{stats.minutes_between_blocks:.2f}\"\n elif self.type == \"number_of_transactions\":\n self._attr_state = str(stats.number_of_transactions)\n elif self.type == \"hash_rate\":\n self._attr_state = f\"{stats.hash_rate * 0.000001:.1f}\"\n elif self.type == \"timestamp\":\n self._attr_state = stats.timestamp\n elif self.type == \"mined_blocks\":\n self._attr_state = str(stats.mined_blocks)\n elif self.type == \"blocks_size\":\n self._attr_state = f\"{stats.blocks_size:.1f}\"\n elif self.type == \"total_fees_btc\":\n self._attr_state = f\"{stats.total_fees_btc * 0.00000001:.2f}\"\n elif self.type == \"total_btc_sent\":\n self._attr_state = f\"{stats.total_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"estimated_btc_sent\":\n self._attr_state = f\"{stats.estimated_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"total_btc\":\n self._attr_state = f\"{stats.total_btc * 0.00000001:.2f}\"\n elif self.type == \"total_blocks\":\n self._attr_state = f\"{stats.total_blocks:.0f}\"\n elif self.type == \"next_retarget\":\n self._attr_state = f\"{stats.next_retarget:.2f}\"\n elif self.type == \"estimated_transaction_volume_usd\":\n self._attr_state = f\"{stats.estimated_transaction_volume_usd:.2f}\"\n elif self.type == \"miners_revenue_btc\":\n self._attr_state = f\"{stats.miners_revenue_btc * 0.00000001:.1f}\"\n elif self.type == \"market_price_usd\":\n self._attr_state = f\"{stats.market_price_usd:.2f}\"", "def set_metrics(self):", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def test_update_derived_metric(self):\n pass", "def compute_track_metrics(use_async=CELERY_ENABLED):\n\n # arbitrary field check to not update already loaded tracks\n for track in Track.objects.filter(duration__is_null=False):\n if use_async:\n async_set_metrics.delay(track)\n else:\n sleep(2)\n track.set_metrics()", "def map_to_ticker(self, raw_ticker: HitbtcRawTickerModel) -> HitbtcTickerModel:\n\n symbol = raw_ticker[\"symbol\"]\n low = Decimal(raw_ticker[\"low\"])\n high = Decimal(raw_ticker[\"high\"])\n volume = Decimal(raw_ticker[\"volume\"])\n volume_quote = Decimal(raw_ticker[\"volumeQuote\"])\n timestamp = raw_ticker[\"timestamp\"]\n raw_ask = raw_ticker[\"ask\"]\n ask = Decimal(raw_ask) if raw_ask is not None else raw_ask\n raw_bid = raw_ticker[\"bid\"]\n bid = Decimal(raw_bid) if raw_bid is not None else raw_bid\n raw_last = raw_ticker[\"last\"]\n last = Decimal(raw_last) if raw_last is not None else raw_last\n raw_open = raw_ticker[\"open\"]\n open_ = Decimal(raw_open) if raw_open is not None else raw_open\n\n ticker = HitbtcTickerModel(\n symbol=symbol,\n low=low,\n high=high,\n volume=volume,\n volume_quote=volume_quote,\n timestamp=timestamp,\n ask=ask,\n bid=bid,\n last=last,\n open=open_)\n\n return ticker", "def calculate_metrics_single_ticker_via_celery(tuple, tca_request, dummy_market):\n\n return tca_ticker_loader.calculate_metrics_single_ticker(tuple, tca_request, dummy_market)", "def to_metrics(self, res: Union[float, Dict[str, float]]) -> float:\n if isinstance(res, dict):\n return list(res.values())[0]\n if isinstance(res, (tuple, list)):\n return res[0]\n return res", "def metrics_builder(metrics_dict):\n df = pd.DataFrame(metrics_dict[\"combined_delay\"].mean(axis=0), columns=[\"Metrics\"])\n df.loc[\"Max_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_max_index\"]]\n df.loc[\"Min_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_min_index\"]]\n df.loc[\"Max_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_max_index\"]]\n df.loc[\"Min_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_min_index\"]]\n df.loc[\"Mean_Absolute_Error\"] = metrics_dict[\"MAE\"]\n df.loc[\"R2\"] = metrics_dict[\"R2\"]\n df.loc[\"Median_Absolute_Error\"] = metrics_dict[\"MEDAE\"]\n df.loc[\"Root_Mean_Squared_Error\"] = metrics_dict[\"RMSE\"]\n df.loc[\"Mean_Squared_Log_Error\"] = metrics_dict[\"MSLE\"]\n df = df.rename(index={\"Actual_Delay\": \"Actual_Delay_Mean\", \"Predicted_Delay\": \"Predicted_Delay_Mean\",\n \"Difference_In_Delay\": \"Difference_In_Delay_Mean\"})\n return df", "def _measurement_update(self):\n pass", "def create_metric(self) -> EvalMetric:\n pass", "async def update_trend_data(self, dt=None):\n for scale in valid_scales:\n await self.get_trend_data(scale, dt)", "def send_metrics(timestamp: Optional[float] = None) -> bool:\n\n def new_point(metric_name: str, result: float):\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = f\"custom.googleapis.com/{metric_name}\"\n\n point = series.points.add()\n point.interval.end_time.seconds = now\n\n if isinstance(result, float):\n point.value.double_value = result\n else:\n point.value.int64_value = result\n return series\n\n now = int(time.time())\n prev_minute_tstamp = timestamp or (now - (now % 60) - 60)\n metrics_pattern = f\"{Monitoring.ACC_PREFIX}_{prev_minute_tstamp}_*\"\n monitoring_keys = redis_client.keys(metrics_pattern)\n all_series = []\n for metric_key in monitoring_keys:\n raw_value = redis_client.get(metric_key)\n values: List[str] = raw_value.split(\"|\") # type: ignore\n metric_name = values.pop(0) # metric name\n op = values.pop(0) # operation - SUM or AVG\n typ = values.pop(0) # INT or FLOAT\n if typ == \"INT\":\n result = sum(map(int, values))\n if op == \"AVG\":\n result = result // len(values)\n else:\n result = sum(map(float, values)) # type: ignore\n if op == \"AVG\":\n result = result / len(values) # type: ignore\n\n all_series.append(new_point(metric_name, result))\n if op == \"AVG\": # create count for AVG metric too\n all_series.append(new_point(f\"{metric_name}_COUNT\", len(values)))\n\n try:\n monitor_client.create_time_series(project_path, all_series)\n except InvalidArgument:\n logging.exception(\"mark_point failed\")\n return False\n else:\n return True", "def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict", "def _calculate_custom_data(self):\n if self.limit is not None:\n self.data['pct'] = self.usage * 100.0 / self.limit\n if self.units == 'hours':\n self.time = timedelta(hours=self.usage)\n self.data['name'] = self.id", "def update_tick(self, tick: TickData):\n new_minute = False\n self.last_price = tick.last_price\n self.open_interest = tick.open_interest\n self.volume = tick.volume\n\n # 更新均价线\n self.molecule = self.molecule + tick.last_price * tick.volume\n self.denominator = self.denominator + tick.volume\n try:\n self.average_price = self.molecule / self.denominator\n except ZeroDivisionError:\n self.average_price = tick.last_price\n\n if self.last_volume is None:\n self.last_volume = tick.volume\n if self.local_symbol is None:\n self.local_symbol = tick.local_symbol\n if not self.bar:\n new_minute = True\n elif self.bar.datetime.minute != tick.datetime.minute:\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.interval = 1\n event = Event(type=EVENT_BAR, data=self.bar)\n self.rpo.put(event)\n [self.update_bar(x, getattr(self, \"min_{}_bar\".format(x)), self.bar) for x in self.XMIN]\n new_minute = True\n if new_minute:\n if self.app.config.get(\"SHARED_FUNC\"):\n shared = SharedData(last_price=round(self.last_price, 2), datetime=tick.datetime,\n local_symbol=self.local_symbol,\n open_interest=self.open_interest, average_price=round(self.average_price, 2),\n volume=self.volume - self.last_volume, gateway_name=tick.gateway_name)\n event = Event(type=EVENT_SHARED, data=shared)\n self.rpo.put(event)\n self.last_volume = tick.volume\n\n self.bar = BarData(\n symbol=tick.symbol,\n exchange=tick.exchange,\n datetime=tick.datetime,\n gateway_name=tick.gateway_name,\n open_price=tick.last_price,\n high_price=tick.last_price,\n low_price=tick.last_price,\n close_price=tick.last_price,\n )\n else:\n self.bar.high_price = max(self.bar.high_price, tick.last_price)\n self.bar.low_price = min(self.bar.low_price, tick.last_price)\n self.bar.close_price = tick.last_price\n self.bar.datetime = tick.datetime\n\n if self.last_tick:\n volume_change = tick.volume - self.last_tick.volume\n self.bar.volume += max(volume_change, 0)\n self.last_tick = tick", "def _get_measurements_with_derived_metrics(self, measurements):\n\n now = time.time()\n\n def metrics_available(*names):\n return all(name in self._event_names and name in measurements\n and name in self._prev_measurements for name in names)\n\n def delta(*names):\n return [measurements[name] - self._prev_measurements[name] for name in names]\n\n # if specific pairs are available calculate derived metrics\n if self._prev_measurements is not None:\n time_delta = now - self._prev_ts\n\n if metrics_available(MetricName.INSTRUCTIONS, MetricName.CYCLES):\n inst_delta, cycles_delta = delta(MetricName.INSTRUCTIONS,\n MetricName.CYCLES)\n if cycles_delta > 0:\n measurements[DerivedMetricName.IPC] = float(inst_delta) / cycles_delta\n\n if time_delta > 0:\n measurements[DerivedMetricName.IPS] = float(inst_delta) / time_delta\n\n if metrics_available(MetricName.INSTRUCTIONS, MetricName.CACHE_MISSES):\n inst_delta, cache_misses_delta = delta(MetricName.INSTRUCTIONS,\n MetricName.CACHE_MISSES)\n if inst_delta > 0:\n measurements[DerivedMetricName.CACHE_MISSES_PER_KILO_INSTRUCTIONS] = \\\n float(cache_misses_delta) * 1000 / inst_delta\n\n if metrics_available(MetricName.CACHE_REFERENCES, MetricName.CACHE_MISSES):\n cache_ref_delta, cache_misses_delta = delta(MetricName.CACHE_REFERENCES,\n MetricName.CACHE_MISSES)\n if cache_ref_delta > 0:\n cache_hits_count = cache_ref_delta - cache_misses_delta\n measurements[DerivedMetricName.CACHE_HIT_RATIO] = (\n float(cache_hits_count) / cache_ref_delta)\n\n self._prev_measurements = measurements\n self._prev_ts = now\n\n return measurements", "def _bs_data_transform(self, qtls, qtlhdrs):\n data0 = self._data_transform()\n data0 = data0[data0[\"origin\"] != \"total\"]\n data1 = self._get_quantiles_by_devp(qtls, qtlhdrs)\n data1 = data1[data1[\"origin\"] != \"total\"]\n data = data0.merge(data1, on=[\"origin\", \"dev\"], how=\"left\")\n\n # Remove qtlhdrs values where rectype==\"actual\".\n for qtlhdr in qtlhdrs:\n data[qtlhdr] = np.where(\n data[\"rectype\"].values == \"actual\", np.NaN, data[qtlhdr].values\n )\n\n # Determine the first forecast period by origin, and set q-fields to actuals.\n increment = np.unique(self.ldfs.index[1:] - self.ldfs.index[:-1])[0]\n data[\"_ff\"] = np.where(\n data[\"rectype\"].values == \"forecast\",\n data[\"dev\"].values, data[\"dev\"].values.max() + increment\n )\n data[\"_minf\"] = data.groupby([\"origin\"])[\"_ff\"].transform(\"min\")\n for hdr in qtlhdrs:\n data[hdr] = np.where(\n np.logical_and(\n data[\"rectype\"].values == \"forecast\",\n data[\"_minf\"].values == data[\"dev\"].values\n ), data[\"loss\"].values, data[hdr].values\n )\n\n data = data.drop([\"_ff\", \"_minf\"], axis=1).reset_index(drop=True)\n dfv = data[[\"origin\", \"dev\", \"rectype\", \"loss\"]]\n dfl = data[[\"origin\", \"dev\", \"rectype\", qtlhdrs[0]]]\n dfu = data[[\"origin\", \"dev\", \"rectype\", qtlhdrs[-1]]]\n dfl[\"rectype\"] = qtlhdrs[0]\n dfl = dfl.rename({qtlhdrs[0]: \"loss\"}, axis=1)\n dfu[\"rectype\"] = qtlhdrs[-1]\n dfu = dfu.rename({qtlhdrs[-1]: \"loss\"}, axis=1)\n return(pd.concat([dfv, dfl, dfu]).sort_index().reset_index(drop=True))", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def update_tick(self, tick: TickData):\n new_minute = False\n\n # Filter tick data with 0 last price\n if not tick.last_price:\n return\n\n if not self.bar:\n new_minute = True\n elif self.bar.datetime.minute != tick.datetime.minute:\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.datetime = self.local_to_timezone(self.bar.datetime)\n self.on_bar(self.bar)\n\n new_minute = True\n\n if new_minute:\n self.bar = BarData(\n symbol=tick.symbol,\n exchange=tick.exchange,\n interval=Interval.MINUTE,\n datetime=tick.datetime,\n gateway_name=tick.gateway_name,\n open_price=tick.last_price,\n high_price=tick.last_price,\n low_price=tick.last_price,\n close_price=tick.last_price,\n open_interest=tick.open_interest\n )\n else:\n self.bar.high_price = max(self.bar.high_price, tick.last_price)\n self.bar.low_price = min(self.bar.low_price, tick.last_price)\n self.bar.close_price = tick.last_price\n self.bar.open_interest = tick.open_interest\n self.bar.datetime = tick.datetime\n\n if self.last_tick:\n volume_change = tick.volume - self.last_tick.volume\n self.bar.volume += max(volume_change, 0)\n\n self.last_tick = tick", "def _update_from_data(self, data):\n try:\n self.channelId = data[\"channelId\"]\n except (KeyError, TypeError):\n raise ValueError(\"Foretold data missing or invalid\")\n\n # If floatCdf is not available, we can just keep it as None\n try:\n self.floatCdf = data[\"previousAggregate\"][\"value\"][\"floatCdf\"]\n except (KeyError, TypeError):\n self.floatCdf = None", "def Update(self):\n print(f\"Updating {self.name} from yfinance API...\")\n import yfinance as yf\n import datetime\n stock = yf.Ticker(self._symbol)\n if (self.name == None or self.name == self.symbol) and stock.info is not None:\n if \"shortName\" in stock.info:\n self.name = stock.info['shortName']\n yhistory = stock.history(period=\"max\")\n print(yhistory)\n\n dividends = []\n for date, row in yhistory.iterrows():\n dividend_today = row['Dividends']\n dividends.append((date, dividend_today))\n if dividend_today != 0.:\n while date - dividends[0][0] > datetime.timedelta(days=360):\n dividends.remove(dividends[0])\n else:\n while date - dividends[0][0] > datetime.timedelta(days=370):\n dividends.remove(dividends[0])\n\n annualDividend = 0.\n for dividend in dividends:\n annualDividend += dividend[1]\n \n self.AddSnapshot(price=row['Open'], date=date, dividend=dividend_today, annualDividend=annualDividend)\n #self.AddSnapshot(price=row['Close'], date=date, annualDividend=annualDividend)\n\n try:\n self.short_percent_of_float = stock.info['shortPercentOfFloat']\n except(KeyError):\n self.short_percent_of_float = 0.\n try:\n self.pe_ratio = stock.info['forwardPE']\n except(KeyError, TypeError):\n self.pe_ratio = float('inf')\n\n print(f\"History for {self.name} updated.\")", "def _build_target_quantile_values_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n ###### Munchausen-specific\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n # tau * ln pi_k+1 (s')\n replay_next_log_policy = utils.stable_scaled_log_softmax(\n self._replay_next_target_q_values, self.tau, axis=1)\n # tau * ln pi_k+1(s)\n replay_log_policy = utils.stable_scaled_log_softmax(\n self._replay_target_q_values, self.tau, axis=1)\n replay_next_policy = utils.stable_softmax( # pi_k+1(s')\n self._replay_next_target_q_values, self.tau, axis=1)\n\n tau_log_pi_a = tf.reduce_sum( # ln pi_k+1(a|s)\n replay_log_policy * replay_action_one_hot, axis=1)\n\n tau_log_pi_a = tf.clip_by_value(\n tau_log_pi_a, clip_value_min=self.clip_value_min, clip_value_max=0)\n\n munchuasen_term = self.alpha * tau_log_pi_a\n #########\n\n # Shape of rewards: (num_tau_prime_samples x batch_size) x 1.\n rewards = self._replay.rewards[:, None] + munchuasen_term[Ellipsis, None]\n rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1])\n\n is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1.\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None],\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n replay_next_policy_ = tf.tile(replay_next_policy,\n [self.num_tau_prime_samples, 1])\n replay_next_log_policy_ = tf.tile(replay_next_log_policy,\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n replay_quantile_values = tf.reshape(\n self._replay_net_target_quantile_values,\n [batch_size * self.num_tau_prime_samples, self.num_actions])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n weighted_logits = (\n replay_next_policy_ * (replay_quantile_values\n - replay_next_log_policy_))\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n target_quantile_values = tf.reduce_sum(weighted_logits, axis=1,\n keepdims=True)\n\n return rewards + gamma_with_terminal * target_quantile_values", "def convert(self, data, *args, **kwargs):\n\n # all of this is still quite ugly and verrrry specific...\n json_data = {}\n for hit in data[\"hits\"][\"hits\"]:\n # pprint(hit)\n\n # get the PQ\n pq = hit.get(\"_source\", {}).get(\"metadata\", {}).get(\"PanDAQueue\", None)\n if not pq:\n continue\n\n # get the list of all benchmark results\n latest_list = (\n hit.get(\"inner_hits\", {})\n .get(\"most_recent\", {})\n .get(\"hits\", {})\n .get(\"hits\", [])\n )\n if len(latest_list) == 0:\n continue\n\n # get the average of the latest benchmark results.\n # Only results not older than 7d, and a maximum of 50 results (whichever value is hit first).\n # If we have no values more recent than 7d, simply use the last available one (that PQ is probably not online anymore anyway)\n values = []\n for d in latest_list:\n date = datetime.datetime.strptime(\n d.get(\"_source\", {}).get(\"timestamp\", \"\"), \"%Y-%m-%dT%H:%M:%SZ\"\n )\n two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)\n seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)\n\n if date > two_days_ago:\n # we are within the last two days, so we take all the measurements we can get!\n values.append(d)\n elif (date < two_days_ago) and (date > seven_days_ago):\n # we are between 2 and 7 days ago, so take only values if we don't have 25 values already\n if len(values) < 30:\n values.append(d)\n elif date < seven_days_ago:\n # we are further away than 7 days, so take a maximum of 5 values from here if we don't have 5 yet\n if len(values) < 10:\n values.append(d)\n\n to_average = [\n i.get(\"_source\", {})\n .get(\"profiles\", {})\n .get(\"fastBmk\", {})\n .get(\"value\", 0.0)\n for i in values\n ]\n json_data[pq] = {\n \"avg_value\": float(sum(to_average)) / len(to_average),\n \"measurements\": len(to_average),\n }\n # print(len(to_average))\n\n return json_data", "def update_temperature_values(self):\n year = self._current_date.year\n month = self._current_date.month\n\n self.ensure_temperatures(dt.date(year, month, 15))\n self.set_temperature_arrays(dt.date(year, month, 15))", "def test_tag_rates_on_duplicate_metric_per_cost_type(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n cost_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n ],\n \"currency\": \"USD\",\n }\n cost_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_key=\"k1\", tag_values=tag_values_kwargs)\n cost_model[\"rates\"][1][\"tag_rates\"] = format_tag_rate(tag_key=\"k2\", tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=cost_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n serializer.data", "def build_ticker_sample(\n number: int = 10,\n metric_list: List[str] = [\"l1\", \"l2\", \"l3\"],\n):\n\n ticker = Ticker()\n\n # SETUP METADATA\n ticker.metadata.response_datetime.GetCurrentTime()\n ticker.metadata.request_duration.FromNanoseconds(random.randrange(5 * 10 ** 9))\n\n # SETUP EXTRA-DATA\n for i in range(number):\n for metric in metric_list:\n ticker.products[i].metrics[metric] = random.uniform(0.0, 100.0)\n\n return ticker", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def upload_metrics(metrics_dict, project, dataset, table):\n # Credentials will be loaded from envvar $GOOGLE_APPLICATION_CREDENTIALS.\n bq_client = bigquery.Client(project=project)\n table_ref = bq_client.dataset(dataset).table(table)\n errors = bq_client.insert_rows_json(table_ref, metrics_dict)\n return errors", "def update(self):\n\n self.stats = statistics.get()\n self.ticker = exchangerates.get_ticker()" ]
[ "0.504665", "0.49457982", "0.482276", "0.47894225", "0.47741964", "0.47666577", "0.4716032", "0.46845242", "0.46796387", "0.46367455", "0.46182653", "0.4578131", "0.4567336", "0.4567215", "0.45670658", "0.4566002", "0.4552697", "0.45424986", "0.45123693", "0.44995657", "0.44877443", "0.44838825", "0.44828242", "0.44786343", "0.44393143", "0.4434736", "0.4430184", "0.44293934", "0.44234604", "0.44196582" ]
0.6156283
0
Rebuild the request from history (self.__references).
def rebuild_request(self) -> Quotecast.Request: references = self.references request = Quotecast.Request() for vwd_id, metric in references.values(): request.subscriptions[vwd_id].append(metric) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebuild(self):\n _logger.info( \"Rebuilding the API Caches...\" )\n\n # fill out the data structures\n self._buildApiTypesList()\n #_buildMayaTypesList()\n \n self._buildMayaReservedTypes(force=True)\n\n self._buildApiRelationships()\n\n # merge in the manual overrides: we only do this when we're rebuilding or in the pymelControlPanel\n _logger.info( 'merging in dictionary of manual api overrides')\n self._mergeClassOverrides()", "def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)", "def _rebuild(self, *args, **kwargs):\n handle = self._args.copy() # Original constructor arguments\n argnames = [i for i in self._traversable if i not in kwargs]\n handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)]))\n handle.update(kwargs)\n return type(self)(**handle)", "def _clone_rip(self, memo):\n # references lists of definitions need to be vacated except those that were cloned.\n for definition in self._definitions:\n new_references = set()\n for ref in definition._references:\n if ref in memo.values():\n new_references.add(ref)\n for instance in definition._children:\n instance._reference._references.add(instance)\n\n definition._references = new_references", "def history(self, history):\n self._history = history", "def reset(self):\n self._current_request = {}\n return self", "def reset(self):\n raise NotImplemented('Do not call WithHistory directly')", "def history(self, history):\n\n self._history = history", "def rebuild(context):\n clean(context)\n build(context, cache=False)", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def _push_history(self):\n self._history.append(self._state)", "def rebuild(self):\n self.from_samples(self.samples)", "def change_history(self, new_reflist, modification_msg):\n self.visual.log(\"New reference list wrt: [{}], yielded {} items.\".format(modification_msg, len(new_reflist)))\n self.push_reference_list(new_reflist, modification_msg)\n # unselect stuff -- it's meaningless now\n self.unselect()", "def _update_head_history(self):\n # pylint: disable=broad-except\n try:\n head = [h for h in self._git.heads if h.name == self.head][0]\n self.head_hash = head.commit.hexsha\n self.head_history = [\n {\n \"commit\": str(c.newhexsha),\n \"timestamp\": c.time[0],\n \"message\": c.message,\n \"author\": {\"name\": c.actor.name, \"email\": c.actor.email},\n }\n for c in head.log()[::-1]\n ]\n except Exception as err:\n self.log.warn(\"Git head update error, ignoring: %s\", err, exc_info=True)\n self.head_history = []", "def refresh(cls):\n # Flip the order of the links so that the first URL listed is the\n # highest priority and will take precedence\n for url in current_app.config['MATLAB_DOC_LINKS'][::-1]:\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, 'html.parser')\n\n terms = soup.findAll('td', {'class': 'term'})\n links = [term.find('a') for term in terms]\n\n for link in links:\n\n function = link.text.rstrip()\n\n doc = cls.query.filter_by(name=function).first()\n doc_url = urljoin(url, link['href'])\n\n # Create an entry if one doesn't already exist\n if doc is None:\n doc = cls(name=function)\n\n doc.link = doc_url\n doc.save()\n\n # Make sure to remove i and j entries\n toremove = cls.query.filter(or_(cls.name == 'i', cls.name == 'j')).all()\n for item in toremove:\n item.delete()\n\n return cls.query.all()", "def reindex(self):\n if self.channels is None:\n return\n\n self.data = None\n\n keep_indices = self.channels.new_indices_in_old()\n self.channels.reindex()\n\n if self.parms is not None:\n self.parms = self.integration.get_dependents(\n self.get_config_name())\n\n channel_attributes = self.channel_dependent_attributes\n\n for attribute, value in self.__dict__.items():\n if attribute not in channel_attributes:\n continue\n if not isinstance(value, np.ndarray):\n continue\n setattr(self, attribute, value[keep_indices])", "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "def reload(self):\n # type: () -> None\n parsed_requirements = self.parse()\n self.requirements = parsed_requirements[0]\n self.index_urls = parsed_requirements[1]\n self.nested_cfiles = parsed_requirements[2]\n self.nested_rfiles = parsed_requirements[3]", "def process_request(self, request):\n super(HistoryChangesetMiddleware, self).process_request(request)\n if request.META.get('REQUEST_METHOD') in ('GET', 'HEAD'):\n return\n request.changeset = None\n request.close_changeset = False\n # Default is to update cached objects as they are modified\n request.delay_cache = False\n\n changeset_id = request.GET.get('use_changeset')\n if changeset_id:\n changeset = Changeset.objects.get(id=changeset_id)\n if changeset.user != request.user:\n message = (\n 'Changeset %s has a different user.' % changeset_id)\n return self.bad_request(request, message)\n if changeset.closed:\n message = 'Changeset %s is closed.' % changeset_id\n return self.bad_request(request, message)\n request.changeset = changeset\n # Wait until changeset is manually closed to schedule cache updates\n request.delay_cache = True", "def reset(self):\n self.history = []\n self.frame = {}\n self.params = {}\n self.form = {}", "def _update(self):\n num_new_evals = (self.metamodel.model_evaluations - self._last_rebuild)\n if num_new_evals >= self.rebuild_interval:\n self._built = True\n self._last_rebuild = self.metamodel.model_evaluations\n\n # Rebuild relevance function and make it usable on arrays.\n self._relevance_function = self._construct_relevance_function()\n rel_fun = np.vectorize(self._relevance_function)\n\n # Learn relevance prediction model\n data = self.metamodel.history.get_model_evaluations()\n relevance_values = rel_fun(data[:, -1])\n self._predictor.fit(data[:, :-1], relevance_values)\n return", "def refresh_history(self):\n\n self.old_jobs = self.secretary_bot.history_bullshit_filter(self.old_jobs)\n self.jobs_save(self.old_jobs, 'overwrite')", "def resequence(self):\n self.history.sort(key=lambda x: x[0])\n self.reset()\n for key, attribute in self.history:\n self.set_current(attribute)\n self.latest = key", "def reindex(self):", "def reindex(self):", "def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ", "def _invalidate_branch_cache(self):\n self._cached_overlapping_branch_list = None", "def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def _refresh_buffers(self) -> None:" ]
[ "0.5566847", "0.546248", "0.54090655", "0.5392603", "0.5335858", "0.5311608", "0.52756536", "0.52722096", "0.52640605", "0.5223405", "0.5222167", "0.5147405", "0.5122049", "0.5031895", "0.50197256", "0.5009398", "0.5008371", "0.49860406", "0.49698722", "0.4964786", "0.49647814", "0.4940484", "0.49188736", "0.4905431", "0.4905431", "0.48873168", "0.48682427", "0.4848128", "0.48440447", "0.48283798" ]
0.67901736
0
check to see whether an id is for a group
def is_group(id): return id.startswith('G')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_group(self, group_name):\n\n return group_name in self._group", "def isValidGroup(expense_group_id, cursor):\n query = \"\"\"\n SELECT * FROM expense_group WHERE id = ?\n \"\"\"\n cursor.execute(query, (expense_group_id,))\n return len(cursor.fetchall()) == 1", "def isSetId(self):\n return _libsbml.Group_isSetId(self)", "def test_groups_group_id_get(self):\n pass", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def what_is(self, _id):\n for g in self.groups:\n if _id in self.h_group_ids[g]:\n return g\n return None", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def _check(isamAppliance, id=None):\n ret_obj = get_all(isamAppliance)\n\n if id != None:\n for groups in ret_obj['data']:\n if groups['id'] == id:\n return True\n\n return False", "def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def is_in_group(user, group_name):\n return is_in_group_user_id(user.id, group_name)", "def is_group(obj) -> bool:\n return hasattr(obj, IOConstants.GROUP_ATTR_NAME)", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def check_presence_groups(self, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} where id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with checking the groups for users. Error: {e}\"\n self.proceed_error(msg)\n return False", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def _group_matcher(group):\n return (group.uuid == _DB_UUID and\n group.name == _INST_GROUP_DB['name'] and\n group.user_id == _INST_GROUP_DB['user_id'] and\n group.project_id == _INST_GROUP_DB['project_id'] and\n group.created_at == _TS_NOW and\n group.updated_at == _TS_NOW and\n group.members == _INST_GROUP_DB['members'] and\n group.policies == [_INST_GROUP_DB['policy']['policy']] and\n group.id == 1)", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def check_id(self, id):", "def test_groups_group_id_state_get(self):\n pass", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def check_group(self, groupid, scenegroup):\n if self.find_with_uuid(groupid, bpy.data.objects, \"objects\"):\n self._found[\"objects\"] += 1\n self._total_server[\"objects\"] += 1\n if self.find_with_uuid(scenegroup[\"asset\"], bpy.data.meshes, \"meshes\"):\n self._found[\"meshes\"] += 1\n self._total_server[\"meshes\"] += 1", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def is_group(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n elif g.node[node]['type'] == 'group':\n return True\n else:\n return False", "def _check_group(group):\n filled_cells = Sudoku._filter_empty(group)\n return utils.is_unique(filled_cells)", "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False" ]
[ "0.7496174", "0.7397895", "0.7248163", "0.72468346", "0.7207925", "0.7201284", "0.71829623", "0.715947", "0.7065384", "0.70614374", "0.6950488", "0.69323575", "0.68989813", "0.6898132", "0.686232", "0.6849973", "0.682175", "0.68139756", "0.6812948", "0.6809037", "0.6806396", "0.6740182", "0.67214423", "0.6714532", "0.66386086", "0.6593438", "0.6543826", "0.6539853", "0.65378433", "0.65290093" ]
0.81725055
0